diff --git a/.gitattributes b/.gitattributes index 1ba7f1fd0ac26176775678e4be402247622a4e4c..7ec48f22b8f7a24fbd488c25a46615451bb4090f 100644 --- a/.gitattributes +++ b/.gitattributes @@ -1188,3 +1188,11 @@ data/2025/2504_09xxx/2504.09858/aee82fd0-741f-450f-b13e-3a2b835b4b13_origin.pdf data/2025/2504_09xxx/2504.09946/bcacb31d-9da1-45e5-a5c9-17c70dbf8404_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_10xxx/2504.10068/b12624ad-dc0b-4ee7-af9d-cbeeaaff7682_origin.pdf filter=lfs diff=lfs merge=lfs -text data/2025/2504_10xxx/2504.10081/74c741cd-0934-4aaa-b8c4-a6d4a37b4d55_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_origin.pdf filter=lfs diff=lfs merge=lfs -text +data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_origin.pdf filter=lfs diff=lfs merge=lfs -text diff --git a/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_content_list.json b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..90f464cedadcf6c52428522c5d5ab5ebb65f35d5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_content_list.json @@ -0,0 +1,3218 @@ +[ + { + "type": "text", + "text": "Evaluating Machine Learning-Driven Intrusion Detection Systems in IoT: Performance and Energy Consumption", + "text_level": 1, + "bbox": [ + 80, + 63, + 882, + 112 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Saeid Jamshidi, Kawser Wazed Nafi, Amin Nikanjam, Foutse Khomh", + "bbox": [ + 80, + 124, + 645, + 142 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "SWAT, Polytechnique, Montréal, H3T 1J4, Quebec, Canada", + "bbox": [ + 80, + 155, + 406, + 168 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ARTICLEINFO", + "text_level": 1, + "bbox": [ + 80, + 187, + 225, + 201 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Keywords:", + "bbox": [ + 80, + 213, + 142, + 225 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Machine Learning, Intrusion Detection System, Energy Consumption, Software-Defined Networking, SDN-IoT", + "bbox": [ + 80, + 225, + 294, + 272 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 371, + 189, + 482, + 201 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In the landscape of network security, the integration of Machine Learning (ML)-based Intrusion Detection System (IDS) represents a significant leap forward, especially in the domain of the Internet of Things (IoT) and Software-Defined Networking (SDN). Such ML-based IDS are crucial for improving security infrastructures, and their importance is increasingly pronounced in IoT systems. However, despite the rapid advancement of ML-based IDS, there remains a gap in understanding their impact on critical performance metrics (e.g., CPU load, energy consumption, and CPU usage) in resource-constrained IoT devices. This becomes especially crucial in scenarios involving real-time cyber threats that challenge IoT devices in a public/private network.", + "bbox": [ + 369, + 213, + 915, + 308 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "To address this gap, this article presents an empirical study that evaluates the impact of state-of-the-art ML-based IDSs on performance metrics such as CPU usage, energy consumption, and CPU load in the absence and presence of real-time cyber threats, with a specific focus on their deployment at the edge of IoT infrastructures. We also incorporate SDN to evaluate the comparative performance of ML-based IDSs with and without SDN. To do so, we focus on the impact of both SDN's centralized control and dynamic resource management on the performance metrics of an IoT system. Finally, we analyze our findings using statistical analysis using the Analysis of Variance (ANOVA) analysis. Our findings demonstrate that traditional ML-based IDS, when implemented at the edge gateway with and without SDN architecture, significantly affects performance metrics against cyber threats compared to DL-based ones. Also, we observed substantial increases in energy consumption, CPU usage, and CPU load during real-time cyber threat scenarios at the edge, underscoring the resource-intensive nature of these systems. This research fills the existing knowledge void and delivers essential insights into the operational dynamics of ML-based IDS at edge gateway in IoT systems.", + "bbox": [ + 369, + 308, + 915, + 464 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 80, + 490, + 220, + 506 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid expansion of the Internet of Things (IoT) has ushered in an era where data flows seamlessly across various sectors, driving profound changes in how devices interact [1][2]. This intricate IoT ecosystem, composed of countless devices, sensors, and intelligent nodes, has fundamentally reshaped how we think about device communication, significantly minimizing the need for human involvement [3]. The integration of Software-Defined Networking (SDN) within the IoT landscape represents a significant step forward, creating a unified IoT-SDN framework that offers centralized control, improved network management, and stronger security measures [4][5].", + "bbox": [ + 80, + 511, + 485, + 692 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid expansion of IoT, driven by the interconnection of millions of devices via Wireless Sensor Networks (WSNs), presents significant challenges [6]. These challenges stem mainly from these devices' limited memory, power, and battery life, highlighting the need for optimized computing and advanced data analysis techniques [7]. Deploying SDN within this framework aims to overcome these obstacles by offering a streamlined, secure network infrastructure that facilitates effective resource allocation and enhanced threat", + "bbox": [ + 80, + 692, + 485, + 825 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "management.", + "bbox": [ + 509, + 490, + 606, + 503 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Given the widespread security vulnerabilities in IoT networks, such as service disruptions and unauthorized access, the importance of Machine Learning (ML)-based Intrusion Detection Systems (IDS) has grown [8]. ML-based IDS are crucial for protecting network integrity due to their ability to adapt dynamically and effectively identify threats [9][10] [11].", + "bbox": [ + 507, + 503, + 915, + 607 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "However, despite advancements in developing ML-based IDS for IoT, several critical gaps remain, as highlighted by Tekin et al. [12]. While previous research has examined ML-based IDS's performance in controlled, static testbed environments, there is a significant gap in understanding how these systems operate under the dynamic conditions of real-time cyber threats, especially when IoT is integrated with SDN. Moreover, while the potential of SDN to significantly enhance resource management in IoT systems is widely acknowledged [13][14][15], there is a lack of empirical evidence on how SDN interacts with ML-based IDS during cyber threats.", + "bbox": [ + 507, + 607, + 915, + 788 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this study, we set two primary objectives designed to deepen our understanding of network performance metrics in IoT. Firstly, we assess the impact of deploying ML-based IDS at edge gateway, mainly focusing on ML-based IDS performance metrics under real-time cyber threats. Secondly, we explore the impact of integrating SDN with our testbed, again at edge gateway, to evaluate its influence on performance metrics under similar cyber threats. The rationale behind incorporating SDN into our testbed is its", + "bbox": [ + 507, + 789, + 915, + 924 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Corresponding author", + "bbox": [ + 107, + 832, + 235, + 844 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "**Principal corresponding author", + "bbox": [ + 105, + 846, + 282, + 858 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "jamshidi.saeid@polymt1.ca,", + "bbox": [ + 112, + 860, + 337, + 873 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "kawser.wazed-nafi@polymtl.ca,", + "bbox": [ + 84, + 873, + 317, + 883 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "amin.nikanjam@polymt1.ca, foutse.khomh@polymt1.ca (S.J.K.W.N.A.N.F. Khomh)", + "bbox": [ + 82, + 886, + 478, + 907 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "ORcld(s):", + "bbox": [ + 112, + 909, + 166, + 920 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 80, + 953, + 415, + 967 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "Page 1 of 21", + "bbox": [ + 826, + 952, + 912, + 967 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "potential to improve resource management in IoT systems significantly [16][17]. We conduct a comparative analysis of the performance of seven state-of-the-art ML-based IDSs in two distinct setups: firstly, at the edge gateway, and secondly, in a similar setup augmented with SDN integration at the edge gateway, all under real-time cyber threats. This analysis is designed to elucidate the impact of SDN on performance metrics and resource management in IoT systems, especially highlighting how SDN integration can optimize the operational efficiency and resilience of IoT networks against the backdrop of evolving cyber threats. To summarize, this paper makes the following contributions:", + "bbox": [ + 82, + 69, + 485, + 248 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Assessing performance metrics of ML-based IDS in IoT systems under real-time cyber threats: Our investigation revealed the significant impact of seven ML-based IDS on the performance at the edge, specifically measuring CPU usage, CPU load, and energy consumption amidst cyber threats. Utilizing ANOVA, we clarify the operational consequences of deploying these sophisticated IDSs on the edge.", + "- Evaluating the impact of ML-based IDS at edge integrated with SDN: we evaluated the performance metrics of seven ML-based IDS at the edge gateway system integrated with SDN. Utilizing ANOVA, we clarify the impact of the integrated SDN with IoT on deploying these sophisticated IDS under real-time cyber threats.", + "- Proposing a plugin-based ML-based IDS test suite: This test suite comes with a group of available datasets and available ML-based IDSs and allows the users to define their own IoT and SDN applications and test their ML-based IDSs and models in terms of detection accuracy and performance metrics. Researchers can efficiently perform comparative analyses for their algorithms and models with other available algorithms and models. The test suite is publicly available (section 8) for researchers and practitioners to reuse." + ], + "bbox": [ + 112, + 259, + 487, + 654 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "The remainder of this paper is organized as follows: Section 2 discusses the review of our research literature. Section 3 discusses the necessary background knowledge. In Section 4, we describe the experimental design, the Research Questions (RQs), and the metrics of the experiments. Section 5 explains our results and findings. Section 8 discusses threats to the validity of our study. Finally, Section 9 concludes the paper and outlines future work.", + "bbox": [ + 82, + 665, + 485, + 784 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Works", + "text_level": 1, + "bbox": [ + 84, + 807, + 236, + 823 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Understanding the performance trade-offs of ML-based IDS in IoT, especially in resource-constrained edge gateways, remains an open challenge. While numerous studies, as mentioned in the previous section, have focused on detection accuracy, limited research has analyzed their real-time computational impact. In particular, there is a significant gap in understanding how ML-based IDS operate under real-time", + "bbox": [ + 84, + 829, + 485, + 934 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "cyber threats, especially when integrated with SDN. This section reviews prior works on ML-based IDS in IoT and SDN, examining their strengths and limitations and focusing on ML models and energy consumption concerns.", + "bbox": [ + 512, + 69, + 914, + 129 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1. IoT Intrusion Detection", + "text_level": 1, + "bbox": [ + 512, + 142, + 739, + 156 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Alsulami et al. [18] proposed a new ML model to identify and categorize network activity in IoT systems. Their research aimed to classify network traffic into distinct categories, including normal behavior and various types of attacks (e.g., Mirai, Denial-of-Service (DoS), Scan, and Man-in-the-Middle (MITM)). The study involved testing several supervised learning models on the customized IoTID20 dataset, including Spiking Neural Networks (SNNs), DT, Boosting Trees (BT), Support Vector Machines (SVM), and KNN. These models, enhanced through deep feature engineering, effectively identified and classified network anomalies.", + "bbox": [ + 512, + 158, + 914, + 336 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Mukherjee et al. [19] conducted an in-depth investigation into the predictive capabilities of supervised learning models (e.g., Logistic Regression (LR), Naïve Bayes (NB), DT, RF, and Artificial Neural Network (ANN)) for anomaly detection. Their study utilized a dataset comprising 350,000 data points. The research compared these models against established state-of-the-art techniques, including BIRCH clustering and K-Means, and evaluated their performance in different scenarios. This included an analysis using the complete dataset and a separate evaluation after removing binary data points in the 'value' feature. The models demonstrated high precision in both scenarios, underscoring their efficacy in practical anomaly forecasting and enhancing security measures against potential risks.", + "bbox": [ + 512, + 337, + 914, + 548 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Elnakib et al. [20] proposed the Enhanced Intrusion Detection Deep Learning Multi-class Classification Model (EIDM), a sophisticated Deep Learning (DL) model designed to enhance security in the IoT context. This model is adept at accurately categorizing 15 distinct traffic characteristics, encompassing a range of 14 discrete attack types. The performance of EIDM was evaluated against four other contemporary models, focusing on classification accuracy and efficiency. The increased precision of EIDM highlights its promise as a powerful solution for safeguarding IoT networks against a wide range of attacks.", + "bbox": [ + 512, + 549, + 914, + 713 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Douiba et al. [21] proposed an innovative IDS to enhance IoT device security. Their approach utilized gradient boosting and DT in the Catboost framework. The model's performance was rigorously assessed on several datasets, including NSL-KDD, IoT-23, BoT-IoT, and Edge-IIoT, with optimization achieved through GPU acceleration. The IDS distinguished itself with its ability to detect anomalies in real-time and its computing efficiency, demonstrating high accuracy, recall, and precision metrics, around $99.9\\%$ on a record detection and computation time.", + "bbox": [ + 512, + 713, + 914, + 863 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Kasongo et al. [22] presented a research endeavor in which they proposed a Feed-Forward Deep Neural Network (FFDNN) IDS, enhanced by the inclusion of a Wrapper Feature Extraction Unit (WFEU) utilizing the Extra Trees", + "bbox": [ + 512, + 865, + 914, + 924 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 1 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "Page 2 of 21", + "bbox": [ + 826, + 953, + 912, + 965 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "algorithm. The WFEU-FFDNN was evaluated for its performance on several datasets, including UNSW-NB15 and AWID, and compared with traditional ML methods. The system demonstrated high classification accuracies in binary and multiclass classifications across these datasets, significantly outperforming in scenarios involving the AWID dataset. The enhanced precision of the WFEU-FFDNN model emphasizes its efficacy in real-time anomaly detection and computing efficiency.", + "bbox": [ + 82, + 68, + 485, + 204 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In addition to all of the works stated above, Verma et al. [23] examined ML algorithms in the context of augmenting security measures in the IoT. The researchers compared classifiers using benchmark datasets (e.g., CIDDS-001, UNSW-NB15, and NSL-KDD). This analysis was supported by statistical tests, namely the Friedman and Nemenyi tests. The researchers also evaluated the reaction times on the Raspberry Pi platform, showcasing the adaptability and efficiency of the classifiers in IoT scenarios, hence emphasizing their practical relevance.", + "bbox": [ + 82, + 205, + 485, + 354 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Otoum et al. [24] presented a scholarly investigation in which they propose a DL-powered intrusion detection system (DL-based IDS) to effectively address challenges associated with feature learning and dataset management. The DL-based IDS developed by the researchers integrates the Spider Monkey Optimization(SMO) algorithm with the stacked-deep polynomial network (SDPN) to enhance threat identification. The system can detect various abnormalities, including DoS, User to Root attacks (U2R), probing, and Root-to-local attacks (R2L). The DL-based IDS was evaluated using the NSL-KDD dataset and exhibited outstanding performance metrics, showcasing its efficacy in various aspects of threat detection.", + "bbox": [ + 82, + 355, + 485, + 548 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Gaber et al. [25] highlight securing IoT systems, especially in complex environments ( e.g., smart cities). The authors introduced a feature selection methodology that combines constant removal and recursive feature elimination strategies. They utilized a DT classifier with a subset of 8 characteristics, assessed on the AWID dataset using various ML classifiers. In contrast to existing methods, their approach exhibited exceptional performance, achieving high accuracy, precision, and F1 score rates. These results underscore the potential of their methodology in the domain of IoT-IDS.", + "bbox": [ + 82, + 549, + 485, + 699 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Sachdeva et al. [26] investigate the issue of fortifying cybersecurity in IoT networks to mitigate the impact of distributed denial-of-service (DDoS) attacks. The authors put out an innovative approach for data pre-processing, which involves the integration of ML and DL classifiers. The class imbalances in the BOT-IoT and TON-IoT datasets from UNSW Australia are mitigated using several Synthetic Minority Oversampling Technique (SMOTE) variants. The hybrid methodology employed in this study, which integrates many algorithms, demonstrates the promising prospects for efficient detection of DDoS attacks in IoT networks.", + "bbox": [ + 82, + 701, + 485, + 863 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The related works discussed above show that the most ML-based IDS developed and re-used by researchers are DT, KNN, RF, LSTM, CNN, and a hybrid model of CNN and", + "bbox": [ + 84, + 881, + 484, + 924 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "LSTM. In addition, EIDM is the most recent work that has overcome the limitations of the previous ML models. That is why we proceed with all these six ML-based IDS to carry out our study in this paper.", + "bbox": [ + 512, + 69, + 912, + 129 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2. Energy consumption in IDS", + "text_level": 1, + "bbox": [ + 512, + 141, + 769, + 156 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Only a tiny amount of research has been done so far to determine the energy consumption in IDS. Among them, Tekin et al. [12] investigated the topic of IDS in the context of the IoT, with a specific focus on the energy consumption aspect in devices with limitations. The authors assessed various ML paradigms in the context of cloud computing, edge computing, and IoT devices. They specifically emphasize the promising capabilities of TinyML for microcontroller units (MCUs). DT algorithm demonstrates in terms of training, inference, and power efficiency. Although Naive Bayes (NB) has superior training speed, it exhibits a minor accuracy trade-off requirements of the KNN algorithm increase proportionally with the quantity of the dataset, hence diminishing its suitability for deployment in IoT systems. Both DT and RF exhibit low power consumption and high accuracy. However, it is essential to consider that RF's longer execution time represents a trade-off. The research findings also elucidate the advantages and constraints of cloud-based ML, underscoring the significance of algorithm choice in practical implementations.", + "bbox": [ + 512, + 157, + 914, + 456 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Nimmy et al. [27] utilize the energy consumption patterns of IoT devices to identify irregularities in smart home environments. They developed a prototype of a smart camera based on Raspberry Pi to gather power traces during regular operations and simulated DDoS attacks. This approach emphasizes the importance of energy consumption as a crucial indicator of aberrant behaviors. The deep feedforward neural network used in their study demon- strates exceptional performance in identifying anomalies, as evidenced by rigorous evaluations of ML models. This indicates its potential to enhance the security of smart homes significantly.", + "bbox": [ + 512, + 457, + 912, + 636 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.3. IoT Intrusion Detection in SDN", + "text_level": 1, + "bbox": [ + 512, + 650, + 801, + 663 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Chaganti et al. [28] present a sophisticated IDS for IoT networks. This system leverages SDN and specifically emphasizes the utilization of DL techniques. The research is for its utilization of LSTM networks, a Recurrent Neural Network (RNN) type renowned for its efficacy in handling time series data, which is critical in detecting network threats. The authors' principal contribution is utilizing an LSTM model, which they employ to discern network attacks. To evaluate the efficacy of their approach, the authors conduct a comparative analysis with alternative architectures(e.g., SVM). The experimental findings present solid evidence that highlights the improved efficacy of the LSTM model in accurately categorizing various network attacks. The LSTM model demonstrated exceptional accuracy and efficiency in detecting attack patterns, surpassing conventional ML models in precision and recall metrics.", + "bbox": [ + 512, + 665, + 914, + 902 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "M. M. Isa et al. [29] present the DAERF model in their research, an innovative IDS for SDN. This model combines", + "bbox": [ + 512, + 905, + 912, + 933 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 2 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "Page 3 of 21", + "bbox": [ + 826, + 953, + 912, + 965 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "a Deep Autoencoder (DAE) with an RF algorithm, creating a unique approach. The DAE excels in feature extraction and data dimensionality reduction. At the same time, the RF approach, known for using an ensemble of DTs, shows significant accuracy and robustness in classification tasks. The DAERF model was evaluated in a simulated SDN using commonly used datasets, demonstrating a high efficacy level. The integration of DL and ML in the DAERF model represents a novel approach that effectively identifies and categorizes network intrusions, enhancing the security of SDN systems and ensuring their capability to handle real-time applications with scalability and adaptability.", + "bbox": [ + 82, + 69, + 484, + 248 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Phan The Duy et al. [30] presented 'FoolYE,' an innovative IDS designed specifically for SDN systems. The system combines cyber deception techniques with Moving Target Defense (MTD) methodologies. The core of this methodology lies in its ability to create a dynamic and misleading network environment, making it challenging for malicious actors to identify and exploit genuine resources. A key innovation is deep transfer learning-based IDS, which employs advanced DL models (e.g., ResNet50 and DenseNet161), originally designed for image recognition. These models have been adapted using deep transfer learning techniques to analyze network traffic for ML-based IDS, demonstrating the versatility and efficacy of DL in cybersecurity. The study involved experiments in simulated SDN systems, where the performance of the IDS was thoroughly examined, showing its high capability in accurately detecting a wide range of network intrusions.", + "bbox": [ + 82, + 250, + 484, + 503 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Despite advancements in ML-based IDS for IoT, a significant gap remains in understanding their real-time computational impact, especially in energy consumption, CPU load, and CPU usage at the edge gateway. This gap is further compounded by the lack of empirical studies evaluating the effectiveness and efficiency of ML-based IDS in real-world, resource-constrained edge gateway, especially when integrated with SDN during cyber threats. To address these shortcomings, our study provides a comprehensive empirical analysis of ML-based IDS, focusing on their performance trade-offs in SDN-enabled and non-SDN edge gateways. Specifically, we assess how different ML-based IDS models impact system resources under real-time cyber threats, offering critical insights into their feasibility for deployment in IoT networks.", + "bbox": [ + 82, + 505, + 484, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3. Background", + "text_level": 1, + "bbox": [ + 84, + 747, + 216, + 764 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This section dives into the underlying premise of the research's baselines.", + "bbox": [ + 84, + 769, + 482, + 796 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Decision Tree (DT): In the field of IDS, DT is a key ML method for analyzing network data. They use trees, e.g., models, to break down network features into binary decisions, evaluating network attributes at each node to identify effective splits. This creates a rule-based hierarchy that excels at spotting differences between normal and suspicious network activities. DTs are valued for their clarity and ease of interpretation, playing a vital in improving cybersecurity by identifying unusual or unauthorized actions", + "bbox": [ + 82, + 799, + 484, + 934 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "[31] [32].", + "bbox": [ + 512, + 69, + 576, + 81 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Random Forest (RF): The algorithm is highly valued in IDS for its precision in classifying network data. Utilizing RF, an ML algorithm, it creates a group of DT to assess various network attributes, effectively distinguishing between normal and malicious activities. RF excels in managing large datasets, balancing IDS data disparities, and minimizing overfitting, making IoT and network security crucial. It achieves accurate detection of unusual network behaviors [33] [34].", + "bbox": [ + 512, + 84, + 914, + 218 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "K-Nearest Neighbor (KNN): The KNN algorithm is a key IDS tool known for its effective similarity-based classification. It compares network traffic with existing labeled data using distance metrics to classify new instances, with 'k' indicating the number of neighbors considered. This method is crucial for identifying normal versus abnormal network activities, offering a simple yet versatile solution for real-time IDS. KNN excels in both binary and multiclass problems, providing quick, reliable categorizations crucial for responding to threats in dynamic networks [35] [36] [37]. Long short-term memory (LSTM): LSTM networks, a type of recurrent neural network, are highly effective in analyzing sequential data for IDS. Their unique memory cells excel at identifying complex patterns in network traffic, making them adept at spotting advanced threats that traditional methods may miss. LSTMs are especially valuable for maintaining context over data sequences, which is crucial for distinguishing between normal and malicious network activities. Their application in IDS significantly boosts cybersecurity, especially in dynamic and IoT environments, by adapting to new threats and efficiently handling varying data lengths, offering a robust solution to modern cybersecurity challenges [38] [39].", + "bbox": [ + 512, + 220, + 914, + 564 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Convolutional Neural Network(CNN): CNNs provide a resilient DL methodology for IDS. CNNs are widely recognized for their ability to independently acquire hierarchical features from network traffic. This is achieved through convolutional, pooling, and fully connected layers, which enable the discernment of spatial patterns in the traffic data. This capacity facilitates the recognition of both well-established and new threats. CNN in IDS is considered crucial in enhancing cybersecurity defenses against a wide range of cyber threats due to their capacity to scale effectively and efficiently handle real-time data [40] [41].", + "bbox": [ + 512, + 564, + 914, + 728 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Hybrid model of LSTM and CNN: The integration of LSTM and CNN models into IDS significantly boosts network security by combining the spatial analysis capabilities of CNNs with the temporal pattern recognition of LSTMs. This hybrid approach detects complex cyber threats by analyzing network traffic data in both spatial and temporal dimensions. CNNs effectively identify security breaches through local pattern recognition, while LSTMs track the sequence of network events over time, offering a detailed understanding of potential threats. This fusion results in more accurate and efficient detection of sophisticated, multistage attacks, reducing false positives and adapting to new threats, thereby enhancing overall anomaly detection and", + "bbox": [ + 512, + 731, + 914, + 924 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 3 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "Page 4 of 21", + "bbox": [ + 826, + 953, + 912, + 965 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "maintaining network integrity without excessive alerts [42] [43].", + "bbox": [ + 82, + 69, + 485, + 98 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "EIDM: The EIDM is a cutting-edge IDS approach expertly handling a wide range of network events. Its design combines convolutional and dense layers to tackle the challenges of class diversity and data imbalance. The model begins with a 120-node dense layer, followed by an 80-neuron convolutional layer with a kernel size of 20 to better distinguish between similar network activities. It also features a Maxpooling layer for enhanced feature extraction and a dropout layer to avoid overfitting. EIDM can classify 15 network behaviors through six dense layers, using 'relu' activation and SGD and Adam optimizers for optimal accuracy and efficiency. According to [20], EIDM's unique structure and optimization techniques make it a standout solution for improving network IDS.", + "bbox": [ + 82, + 99, + 485, + 308 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4. Study design", + "text_level": 1, + "bbox": [ + 84, + 331, + 220, + 349 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This section describes our methodology to evaluate the impact of specific ML-based IDSs using selected performance metrics. We first mention our Research Questions (RQs), followed by an explanation of the experimental design and the metrics used to evaluate the impact of the ML-based IDS.", + "bbox": [ + 84, + 353, + 485, + 442 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1. Research questions(RQs)", + "text_level": 1, + "bbox": [ + 84, + 456, + 319, + 472 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our research aims to address the following RQs:", + "bbox": [ + 109, + 472, + 433, + 486 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- RQ1: How do ML-based IDSs impact CPU usage, CPU load, and energy consumption at the edge gateway without SDN during real-time cyber threats?", + "bbox": [ + 112, + 496, + 485, + 556 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This RQ examines the impact of ML-based IDSs on crucial performance metrics, specifically CPU usage, CPU load, and energy consumption, at edge gateway not integrated with SDN. It focuses on analyzing the performance of seven state-of-the-art ML-based IDSs and their impacts on these key metrics in the face of diverse cyber threats.", + "bbox": [ + 122, + 557, + 485, + 662 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "- RQ2: What are the differences in CPU usage, CPU load, and energy consumption impacts of ML-based IDS at the edge gateway with SDN integration during real-time cyber threats?", + "bbox": [ + 112, + 672, + 485, + 733 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "This RQ explores how ML-based IDSs influence CPU usage, CPU load, and energy consumption at the edge gateway integrated with SDN. It involves analyzing the impacts of various ML-based IDSs on these essential performance metrics under various cyber threats.", + "bbox": [ + 122, + 733, + 485, + 808 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2. DataSet", + "text_level": 1, + "bbox": [ + 84, + 820, + 184, + 834 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In our study, we used the CICIDS2017 data set [44], a highly regarded resource organized by the Canadian Institute for Cybersecurity. This dataset is recognized as one of the gold standards in cybersecurity research, capturing a broad spectrum of benign network activities and the latest cyberattacks [45]. CICIDS2017 is designed to simulate", + "bbox": [ + 84, + 838, + 485, + 926 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg", + "table_caption": [ + "Table 1 Distribution of labeled IoT-SDN attacks in the dataset" + ], + "table_footnote": [], + "table_body": "
IoT Attack LabelsNo of labeled entries
BENIGN2271320
DoS Hulk230124
Port Scan158804
DDoS128025
DoS GoldenEye10293
FTP-Patator7935
SSH-Patator5897
DoS slowloris5796
DoS Slowhtttest5499
Bot1956
Web Attack & Brute Force1507
Web Attack & XSS652
Infiltration36
Web Attack & SQL Injection21
Heartbleed11
", + "bbox": [ + 521, + 102, + 900, + 332 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "real-world network environments, making it an essential resource for researchers to test and validate advanced IDS thoroughly. The breadth and diversity of the asset highlight its importance, making it necessary for those aiming to strengthen network security paradigms.", + "bbox": [ + 512, + 359, + 912, + 433 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. The ML-based IDS", + "text_level": 1, + "bbox": [ + 512, + 447, + 702, + 461 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Numerous ML-based IDS have been developed by researchers [12] [22] [25] [46]. However, we had a significant challenge in reviewing these publications and selecting some for our study. Most did not make their solutions' applications or source code publicly available. This lack of transparency hinders the ability to experiment with these works in real IoT devices. This omission complicates, and may even prevent, the objective comparison of the proposed solutions. Consequently, to initiate our study, it became necessary to independently implement all ML-based IDS that have been previously utilized, except the ML-based IDS proposed by [20], which shared their code ML-based IDS available to researchers. In this section, we explore the implementation process of seven ML-based IDSs that we have developed: DT, KNN, RF, LSTM, CNN, and a hybrid model of LSTM and CNN. Table 3 presents a comparative analysis of the performance metrics of ML-based IDS.", + "bbox": [ + 512, + 462, + 914, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3.1. DT, KNN, RF", + "text_level": 1, + "bbox": [ + 512, + 731, + 663, + 745 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We have developed and deployed DT-based IDS, RF-based IDS, and KNN-based IDS [47], each specifically designed to improve security policy. The foundation of these models is a preprocessing technique applied to the selected CICIDS 2017 dataset. The dataset features various simulated cyber-attack scenarios alongside standard traffic data. It encompasses multiple numerical attributes, including but not limited to packet sizes, flow durations, and bytes per flow, which are critical for analyzing network behavior and detecting anomalies. We applied min-max normalization as our initial preprocessing step to ensure uniformity across these diverse numerical attributes and", + "bbox": [ + 512, + 747, + 914, + 925 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 4 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "Page 5 of 21", + "bbox": [ + 826, + 953, + 912, + 965 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg", + "table_caption": [ + "Table 6 Comparison of structure and accuracy of different Neural Network models in IDS for IoT-SDN network" + ], + "table_footnote": [], + "table_body": "
DatasetCICIDS2017CICIDS2017CICIDS2017CICIDS2017
Categories15151515
ModelLSTMLSTM+CNNCNNEIDM
Layers1011812
Parameters5638612795349748735
Structure detailsDense (64)Dense (64)Dense (120)
Dense (128)Conv1D (64, 10)Conv1D (16,30)Conv1D(80,20)
LSTM (128)Conv1D (64, 10)Conv1D (16,30)MaxPooling1D (2)
LSTM (256)MaxPooling1D (2)MaxPooling1D (2)Dense (120)
Dense (128)LSTM (128)Flatten()Dense (100)
Dense (48)LSTM (64)Dense (32)Dense (80)
Dense (15)Dense (64)Dense (15)Dense (60)
Dense (15)Dense (60)
Dense (40)
Dense (15)
Training Accuracy (%)97.72%98.77%97.92%99.57%
Testing Accuracy (%)93.86%95.75%94.74%99.56%
", + "bbox": [ + 176, + 104, + 818, + 375 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "mitigate scale discrepancies. Missing values were imputed to preserve the integrity of the data. The LabelEncoder[48] was utilized to convert labels into a format suitable for ML techniques. An essential aspect of our methodology is to divide the selected dataset into training and testing subsets. For the first RQ, we adopted $80\\%$ training and $20\\%$ testing, aligning with standard practices in ML model development. This adjustment was made to accommodate the different requirements of each research phase. As shown in Table 1, the dataset has five classes (Benign, DDoS, DoS, Brute force, and Port scan) with significantly more entries than the remaining ten classes, which contain fewer samples. SMOTE [49] with auto-sampling was employed to address the class imbalance issue in the dataset. This technique effectively augmented the representation of underrepresented classes, leading to a more balanced dataset for training purposes.", + "bbox": [ + 78, + 399, + 485, + 641 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.2. CNN", + "text_level": 1, + "bbox": [ + 80, + 651, + 173, + 665 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In our research, we deployed a CNN-based IDS tailored for our experimental testbed. The configuration details of the CNN model, including its layers, parameters, and architecture specifics, are outlined in Table 2.", + "bbox": [ + 78, + 667, + 487, + 728 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.3. LSTM", + "text_level": 1, + "bbox": [ + 80, + 738, + 184, + 754 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In our investigation, we implemented an LSTM-based IDS specifically for our testbeds. The detailed architecture and parameters of the LSTM model, crucial for its operation in our IDS, are thoroughly presented in Table 2.", + "bbox": [ + 78, + 755, + 487, + 815 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3.4. Hybrid model of LSTM and CNN", + "text_level": 1, + "bbox": [ + 80, + 827, + 378, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In our exploration, we implemented a hybrid LSTM and CNN architectures model to create an advanced IDS tailored to our experimental setup. This architecture has already been tested in various scenarios [50][51][43]. The intricate configuration of this hybrid LSTM and CNN model, which leverages the strengths of both LSTM and CNN to enhance", + "bbox": [ + 78, + 843, + 487, + 934 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "detection capabilities, is detailed in Table 2.", + "bbox": [ + 509, + 399, + 808, + 413 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The goal of using the hybridization of LSTM and CNN is twofold. First, CNN can drop the non-impactful features and select only the impactful ones (feature engineering). At the same time, it helps to learn the features in a Spatial Hierarchical manner [52]. Second, from our dataset, we got 77 features. As it is unknown which features are impactful from the given features, we applied a 2 1-dimensional CNN layer followed by a max-pooling layer to find the impactful features by learning the 10 nearby features together (kernel size 10). This helps us to create new feature representations where the impactful ones are sustained. Later, we fed these newly derived features directly to 2 LSTM layers. This step helps to learn the spatial and temporal features from CNN, resulting in feature representations presented in context and awarded. Finally, we applied 2 Dense layers to regress the feature representations generated from previous CNN and LSTM layers into 15 classes. This process helps us learn the input features more deeply and increase the classification accuracy.", + "bbox": [ + 507, + 414, + 917, + 701 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4. Experimental Design", + "text_level": 1, + "bbox": [ + 509, + 713, + 719, + 728 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To address RQ1, we designed a testbed incorporating two Raspberry Pi 4 Model B units as edge gateways. Each unit is equipped with 8GB of RAM and a 1.5GHz 64-bit quad-core CPU, providing a realistic environment for evaluating the computational impact of ML-based IDS at the edge gateway. Our study evaluates the performance of seven ML-based IDS models: DT, KNN, RF, LSTM, CNN, EIDM, and a hybrid of LSTM and CNN model, selected for their established effectiveness in cybersecurity. We conducted controlled experiments in IoT-edge networks to assess these IDS models, simulating a range of cyber threats(e.g., BENIGN, DDoS, DoS, Brute force attacks, and the Port scan) using Kali Linux [53]. These experiments", + "bbox": [ + 507, + 728, + 915, + 925 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 5 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 416, + 965 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "Page 6 of 21", + "bbox": [ + 826, + 952, + 912, + 967 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg", + "table_caption": [ + "Table 3 Performance Comparison of ML-based IDS" + ], + "table_footnote": [], + "table_body": "
DTKNNRFLSTMLSTM+CNNCNN
Accuracy0.99850.99670.99810.93860.95750.9474
Precision0.99850.99660.99800.97710.98770.9792
Recall0.99850.99670.99810.95240.96450.9611
F1-Score0.99850.99660.99800.96460.97600.9701
", + "bbox": [ + 80, + 104, + 763, + 189 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg", + "image_caption": [ + "Figure 1: IoT-edge testbed topology, illustrating non-SDN and SDN-enabled setups." + ], + "image_footnote": [], + "bbox": [ + 238, + 206, + 764, + 453 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "enabled us to analyze the IDS models' impact on critical performance metrics, specifically CPU usage, CPU load, and energy consumption.", + "bbox": [ + 82, + 519, + 484, + 564 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To address RQ2, we extended our testbed by integrating the edge gateway with the Ryu controller, establishing an SDN-based environment. Ryu, an open-source Python-based SDN controller [54], provides centralized traffic management, enhancing resource allocation and security analysis. We further utilized Mininet [55] to simulate a realistic SDN infrastructure consisting of eighteen hosts, six switches, and a Ryu controller, mirroring real-world network conditions.", + "bbox": [ + 82, + 564, + 484, + 684 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5. Metrics", + "text_level": 1, + "bbox": [ + 84, + 697, + 183, + 711 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "We evaluated CPU usage, CPU load, and energy consumption in our test beds in the context of ML-based IDS during cyber threat scenarios. We employed the ANOVA[56] to ensure an objective assessment of the performance of various ML-based IDS.", + "bbox": [ + 84, + 713, + 484, + 786 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5.1. CPU Load CPU Usage", + "text_level": 1, + "bbox": [ + 84, + 800, + 299, + 815 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "IDS, especially at the edge and SDN environments. CPU usage measures the percentage of the CPU's current capacity, reflecting how much processing power is dedicated to task execution. High CPU usage in an IDS can signal extensive computational demands, potentially impacting the performance of other tasks and system responsiveness, a concern in resource-limited IoT settings. Efficient IDS, especially those utilizing ML techniques, must manage CPU", + "bbox": [ + 84, + 815, + 484, + 935 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "usage carefully to balance detection accuracy with minimal resource use. Excessive CPU usage can slow IDS's real-time network traffic processing, leading to delays or missed attack detection. On the other hand, CPU load indicates the number of processes waiting to be executed, providing an understanding of the CPU's workload. An increase in CPU load might suggest heavy network traffic or numerous attack attempts, highlighting the risk of system overload. Monitoring CPU load allows for early identification of potential bottlenecks, ensuring that IDS operations do not adversely impact system performance. In SDN-enabled IoT edge systems, adept CPU load management is vital to distribute tasks between IDS and other network efficient functions, ensuring optimal resource allocation and system performance. Both CPU usage and load are pivotal metrics for assessing IDS efficacy in environments where resources are constrained, e.g., at the edge gateway[57][58][59].", + "bbox": [ + 512, + 519, + 914, + 774 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.5.2. CPU Performance Metrics", + "text_level": 1, + "bbox": [ + 512, + 789, + 754, + 802 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "To assess the computational impact of ML-based IDS, we analyze both CPU load and CPU usage, as these metrics provide complementary insights into system performance. CPU usage is typically expressed as a percentage, indicating the proportion of processing power utilized at a given moment. In contrast, CPU load is presented as a numerical value, representing the average number of active processes waiting for CPU execution over a specific time interval. Moreover,", + "bbox": [ + 512, + 804, + 912, + 921 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 285, + 42, + 710, + 56 + ], + "page_idx": 6 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "Page 7 of 21", + "bbox": [ + 818, + 953, + 904, + 965 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "while CPU load can be converted into a percentage, it provides a more detailed view of system stress, especially in multi-core environments. In a multi-core processor, a load value of 1.0 on a single-core system indicates full utilization. In contrast, on a quad-core system, a load of 1.0 suggests that only $25\\%$ of the total available processing capacity is used. This distinction is crucial when interpreting our results, as high CPU load does not always imply that the system is at risk of overutilization—it depends on the number of available processing cores and the workload distribution.", + "bbox": [ + 78, + 68, + 487, + 219 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.3. Energy Consumption", + "text_level": 1, + "bbox": [ + 80, + 230, + 290, + 245 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Energy consumption, often measured in watt-hours or joules, quantifies the amount of energy a device or system expended during its operation. In IoT hardware, where many devices are battery-powered or operate in energy-constrained environments, efficient energy consumption is desirable and necessary. Devices (e.g., sensors, actuators) and even more complex IoT nodes must be designed to perform their tasks while consuming minimal energy, ensuring longevity, and reducing the need for frequent battery replacements or recharges. Moreover, IoT devices integrated with SDN bring a new dimension to the energy conversation; SDN centralizes network control, dynamically optimizing network resources based on real-time demands. Although this centralization offers enhanced flexibility and scalability, it also means that the network's core components must be energy efficient. In IoT systems, where potentially thousands or even millions of devices communicate and exchange data, even minor inefficiencies in energy consumption can accumulate, leading to significant energy drains. Integrating ML-based IDS into the edge gateway emphasizes the need to consider energy metrics critically. ML-based IDS are inherently data-intensive, requiring substantial computational resources to process large datasets for detecting and mitigating security threats. Although these systems offer invaluable security enhancements, their operation can be energy-intensive. Therefore, measuring and optimizing the energy consumption of ML-based IDS is crucial to ensure they deliver effective security measures without unduly burdening the system's energy resources. This balance is essential for maintaining the sustainability and efficiency of the edge gateway, where energy efficiency is often a key concern.", + "bbox": [ + 82, + 247, + 485, + 726 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We employed PowerTop [60], a robust tool, to precisely gauge and examine the energy consumption in two separate testbed configurations: the edge gateway integrated with SDN and without SDN. PowerTop's sophisticated monitoring capabilities allowed us to gain insights into these testbeds' energy consumption patterns and processor activity.", + "bbox": [ + 78, + 727, + 487, + 834 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.4. Designed cyber threats", + "text_level": 1, + "bbox": [ + 80, + 844, + 300, + 860 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For our research, we focused on analyzing DDoS, DoS, brute force attacks, and the port scan. We chose these specific types of attacks since they were already categorized in the employed dataset. These cyber threats are prevalent and", + "bbox": [ + 78, + 861, + 487, + 923 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "pose substantial risks in the field of cybersecurity. Below, a concise summary of each is presented:", + "bbox": [ + 509, + 68, + 914, + 100 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- A Denial-of-Service (DoS): At the edge, DoS attacks are critical cybersecurity threats that disrupt device and service operations by flooding systems with excessive requests and consuming vital resources (e.g., bandwidth, processing power, and memory). This overload prevents the system from serving legitimate users, blocking access to essential operations. The distributed, resource-constrained nature of the edge makes them especially susceptible to DoS attacks. The vulnerability of these devices, coupled with their interconnectedness, means that an attack on a single device can significantly compromise the entire network's functionality and security [61].", + "- A distributed denial-of-service (DDoS): A DDoS attack is a coordinated effort where multiple attackers from different locations flood a specific target, such as a server or network at the edge, with excessive traffic. The goal is to deplete the target's resources, causing severe service disruptions or a complete shutdown. Unlike traditional DoS attacks, which come from a single source, DDoS attacks are distributed across numerous sources, making them harder to defend against. This distributed nature makes DDoS attacks especially dangerous at the edge, where the interconnected and resource-constrained devices can exacerbate the attack's impact, potentially crippling the entire network [62].", + "- Brute Force: A brute force attack involves an attacker systematically attempting to gain unauthorized access to a system by trying every possible combination, such as trying every key until one works. With its many interconnected devices and varying security levels, the edge is especially vulnerable to such attacks. Attackers exploit these weaknesses by repeatedly guessing passwords, encryption keys, or access codes, which seriously threatens the integrity and confidentiality of data at the edge gateway[63].", + "- Port Scan:A port scan aims to identify a target system's open ports. By identifying open ports and the services running on them at the edge, attackers can uncover and exploit vulnerabilities, posing a serious threat to the security and integrity of the edge gateway[64]." + ], + "bbox": [ + 537, + 110, + 917, + 788 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.5.5. Analysis method for energy consumption, CPU usage, CPU load", + "text_level": 1, + "bbox": [ + 507, + 799, + 907, + 829 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We used ANOVA to assess our observed results. ANOVA is an indispensable statistical tool for testing the null hypothesis that posits the equivalence of group means. Our study specifically employed one-way ANOVA to examine the impact of a singular independent variable on the evaluated systems. This method relies on several crucial assumptions, including the necessity for the data to exhibit", + "bbox": [ + 507, + 829, + 915, + 936 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 7 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 418, + 967 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "Page 8 of 21", + "bbox": [ + 816, + 950, + 905, + 967 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "a normal distribution, the variances between groups being equal (homogeneity of variance), and all observations being independent.", + "bbox": [ + 82, + 69, + 484, + 113 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In addition, we conducted 15 separate tests on ML-based IDS to measure CPU load, CPU usage, and energy consumption under various cyber threats. This rigorous approach allowed us to leverage the F statistic, which quantifies the variance ratio between the means of different groups to the variance in the groups. A significant F-statistic, together with a p-value of $\\leq 0.05$ , denotes statistically significant differences between group means, underscoring the efficacy of our testing methodology. By implementing this robust statistical framework, we have thoroughly evaluated the performance of various ML-based IDS models in response to different cyber threats. This analysis has allowed us to identify specific models that demonstrate resilience or efficiency against multiple attacks and require increased computational resources or energy consumption. While CPU load is a key performance metric for IDS evaluation, it is also crucial to consider its impact on IoT device availability and reliability. Excessive CPU consumption by an IDS can degrade the device's primary functions, leading to slow response times or system failures. This is especially critical in real-time applications such as healthcare, industrial automation, and smart home security, where device downtime can have serious consequences. An IDS must enhance security without inadvertently causing an attack such as a DDoS condition due to resource exhaustion. In addition, through these fifteen iterations of testing, ANOVA has enabled us to validate significant differences in IDS performance metrics (e.g., detection accuracy, false positive rates), CPU load, CPU usage, and energy consumption across diverse scenarios. This methodological approach provides a detailed examination of how different IDS models respond to varied threats, establishing a solid statistical foundation for assessing the efficacy of each model in a controlled environment. By distinguishing between performance differences attributable to the models' inherent capabilities and those due to random variation, our use of ANOVA has proven to be critical. It aids in identifying the most resource-efficient and reliable IDS, thereby guiding the selection process for optimal cybersecurity defenses and enhancing our management and understanding of IDS performance under cyber threat conditions [65] [66].", + "bbox": [ + 82, + 114, + 485, + 731 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.6. TestSuite", + "text_level": 1, + "bbox": [ + 84, + 742, + 196, + 757 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "To initiate the research work presented in this paper and to facilitate the environment for further research and testing, we introduce a versatile test suite designed to experiment with and evaluate ML-based IDS in SDN environments. Unlike conventional experimental testbeds, our test suite is an extensible framework equipped with predefined APIs and a selection of pre-integrated algorithms, facilitating the seamless integration and testing of novel IDS models. Another good contribution to our test suite is that users can execute their experiments on it without Raspberry Pi or any other hardware support. As discussed in the previous", + "bbox": [ + 82, + 759, + 485, + 924 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "paragraph, the test suite is developed following the plug-in architecture feature. This ensures that the user can easily integrate their algorithm into the test suite and test the accuracy, energy consumption, and CPU usage with or without security threats. Users can create their own IoT-SDN network and complexity in the network and generate any number of security breaching attacks. This approach not only simplifies the validation process of IDS models in a realistic network scenario but also encourages the exploration of innovative IDS methodologies by providing a solid foundation of tools and benchmarks. We have made the test suite available with the same configuration discussed in Section 4.4. We integrated the same tools for creating an IoT-SDN network, generating security attacks, and measuring IDS accuracy, energy consumption, CPU usage, etc. Through its design, the test suite aims to advance the development and thorough evaluation of cutting-edge IDS solutions, significantly enhancing network security in the era of SDN.", + "bbox": [ + 512, + 69, + 914, + 353 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5. Experimental Results and Analysis", + "text_level": 1, + "bbox": [ + 512, + 374, + 836, + 390 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "This section discusses our experimental results and findings. After presenting our results, we conducted an in-depth statistical analysis using ANOVA. This analysis aims to illuminate the implications and insights that emerge from the experimental results, providing an understanding of the efficacy and nuances of each IDS under study.", + "bbox": [ + 512, + 395, + 914, + 484 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.1. Experimental finding for RQ1 CPU Load:", + "text_level": 1, + "bbox": [ + 512, + 499, + 786, + 527 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We tested ML-based IDSs under various cyberattack scenarios to assess their impact and strain on our testbed. The types of cyberattacks we considered include DDoS, DoS, brute force attacks, and the port scan. Moreover, we conducted the ANOVA focusing on CPU load variations in our testbed. Figure 2 illustrates a comparative analysis of the average CPU load among different ML-based IDS models in the presence of various types of cyberattacks. The DL-based IDS (CNN, LSTM, combined model of LSTM and CNN, and EIDM) consistently maintain lower CPU loads across all attack types, demonstrating their efficiency in resource utilization during inference. In contrast, traditional ML-based IDS such as KNN, DT, and RF exhibit significantly higher CPU loads, especially under brute force and DDoS attacks, with KNN and DT being the most resource-intensive. This is because DL models, such as CNN and LSTM, efficiently handle computations in parallel and are optimized for inference. In contrast, traditional models (e.g., KNN and DT) require more repeated, resource-heavy calculations, such as distance computations in KNN or recursive splitting in DTs, especially under large-scale attacks.", + "bbox": [ + 512, + 529, + 914, + 857 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 512, + 860, + 657, + 873 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "We conducted an ANOVA, and the results presented in Table 4 illuminate significant differences in CPU load among diverse ML-based IDS under DDoS, underscored by F-statistic of 60.40 and a p-value $< 0.05$ . This F-statistic delineates", + "bbox": [ + 512, + 875, + 912, + 934 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 285, + 42, + 710, + 56 + ], + "page_idx": 8 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "Page 9 of 21", + "bbox": [ + 818, + 953, + 904, + 965 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 2: The Average CPU load of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 68, + 752, + 327 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg", + "table_caption": [ + "Table 4 ANOVA results: CPU Load for ML-based IDS under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.873601.6460.40< 0.05
in groups915426.4959.63
Total9727036.36278.73
", + "bbox": [ + 142, + 424, + 853, + 496 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "the contrast in CPU load variance across ML-based IDSs against the variance in, highlighting a significant influence of IDS selection on CPU load. The remarkably low p-value corroborates this finding, conclusively demonstrating the substantial differences in CPU load among the IDSs. Furthermore, we observed similar p-values $(< 0.05)$ across other attacks, including brute force, DoS, and the port scan, so we do not report them. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS under different cyber threats.", + "bbox": [ + 80, + 520, + 485, + 672 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 109, + 682, + 164, + 698 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "DL-based IDS, such as CNN, LSTM, and hybrids, perform more efficiently in managing computational demands across diverse types of cyber threats than traditional ML-based IDS, such as KNN, DT, and RF, as they exhibit higher CPU loads at the edge. This pattern suggests that DL-based IDS' intrinsic efficiency is not attack-specific but rooted in their architecture, making them especially suited for real-time applications at edge gateway. These results are expected, as traditional ML-based IDS (e.g., KNN, DT, RF) perform computationally expensive operations during inference, unlike DL-based IDS, which optimizes processing through parallelization and learned feature extraction.", + "bbox": [ + 105, + 709, + 460, + 920 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "CPU Usage:", + "text_level": 1, + "bbox": [ + 536, + 520, + 626, + 535 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Figure 3 compares the average CPU usage of various ML-based IDS models under different cyberattacks. The KNN model consistently exhibits the highest CPU usage across all attack types, indicating its high computational demand, which limits its use in resource-constrained environments. The RF and DT models are also CPU-bound, though they are less intensive than KNN. In contrast, the LSTM model demonstrates the lowest CPU usage, making it the most efficient option for scenarios where minimizing resource consumption is critical. The hybrid of the LSTM and CNN model, along with the CNN and EIDM models, offer a balance between inference accuracy and computational efficiency, making them viable choices for environments with moderate resource availability.", + "bbox": [ + 507, + 537, + 915, + 746 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 509, + 747, + 658, + 761 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 5 presents our ANOVA results. Our results reveal significant differences in CPU load among diverse ML-based IDS under DDoS, as evidenced by a compelling F-statistic of 60.39 and a p-value $< 0.05$ . This F-statistic highlights the variance in CPU load across IDS groups compared to the variance in, underscoring a significant impact of IDS selection on CPU load. The exceedingly small p-value further supports this conclusion. Moreover, we observed similar p-values (below 0.05) across various cyber threats, such as brute force, DoS, and the port scan, so we do not report those results.", + "bbox": [ + 507, + 761, + 915, + 926 + ], + "page_idx": 9 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 415, + 965 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "Page 10 of 21", + "bbox": [ + 816, + 950, + 912, + 965 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 3: The Average CPU usage of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 69, + 754, + 327 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg", + "table_caption": [ + "Table 5 ANOVA results: CPU Usage for ML-based IDS under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.863601.6460.39< 0.05
in groups915426.4959.62
Total9727036.36278.73
", + "bbox": [ + 142, + 426, + 853, + 499 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 109, + 525, + 166, + 539 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Our analysis reveals that traditional ML-based IDS such as KNN, DT, and RF exhibit increased CPU usage under various cyber threats, thus posing challenges for the edge. Also, LSTM and other DL-based IDS exhibit lower CPU demands. This consistent efficiency across various attacks highlights the benefit of adopting DL-based IDS at the edge gateway. The increased CPU usage of KNN, DT, and RF reflects their reliance on instance-based and tree-splitting operations, which require repeated evaluations. In contrast, DL models efficiently process data in structured layers, reducing computational strain.", + "bbox": [ + 107, + 551, + 462, + 733 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Energy consumption:", + "text_level": 1, + "bbox": [ + 105, + 750, + 265, + 764 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Figure 4 shows that the LSTM and DT models are the most energy-efficient across different types of cyberattacks, consistently exhibiting the lowest energy consumption. The CNN model also performs efficiently, with slightly higher energy usage. The LSTM, CNN model hybrid, and EIDM have moderate energy consumption, balancing complexity and efficiency. In contrast, the KNN model has the highest energy consumption across all scenarios, making it less suitable for energy-constrained environments. The RF model falls in between, with moderate energy demands.", + "bbox": [ + 80, + 765, + 485, + 915 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 534, + 522, + 682, + 537 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "We conducted the ANOVA, and the results presented in Table 6 reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by F-statistic of 57.44 and a p-value of $< 0.05$ . This F-statistic delineates the contrast in energy consumption variance across the group of IDSs against the variance in, highlighting a significant influence of IDS selection on energy consumption. The extremely low p-value further supports this conclusion, conclusively demonstrating the substantial differences in energy consumption among the IDSs. In addition, we observed similar p-values ( $< 0.05$ ) for other cyber threats, such as brute force, DoS, and the port scan, so we do not report the results. This observation demonstrates significant differences in energy consumed among various ML-based IDS when faced with differing cyber threats.", + "bbox": [ + 507, + 538, + 915, + 779 + ], + "page_idx": 10 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 415, + 965 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "Page 11 of 21", + "bbox": [ + 818, + 952, + 912, + 965 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg", + "image_caption": [ + "Figure 4: The Average Energy consumption of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 69, + 757, + 327 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg", + "table_caption": [ + "Table 6 ANOVA results: energy consumption for ML-based IDS under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups647732.077955.3457.44< 0.05
in groups9813571.72138.48
Total10461303.80589.45
", + "bbox": [ + 142, + 424, + 855, + 496 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 109, + 522, + 168, + 537 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Our analysis concludes a marked discrepancy in energy consumption, with traditional ML-based IDS such as KNN, RF, and DT exhibiting significantly higher energy consumption under cyber threats such as DDoS and brute force, a drawback for energy-constrained at the edge. In contrast, DL-based IDS models, LSTM, CNN, EIDM, and their hybrids excel in energy efficiency, making them the preferable choice for the edge. Traditional ML models' higher energy consumption results from their iterative computations and lack of optimized inference paths, making them less viable for real-time IoT applications where power efficiency is crucial.", + "bbox": [ + 107, + 549, + 463, + 759 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "5.2. Experimental finding for RQ2", + "text_level": 1, + "bbox": [ + 80, + 779, + 361, + 795 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "This section presents our experimental results for IoT-edge devices with SDN integration during real-time cyber threats.", + "bbox": [ + 80, + 796, + 485, + 839 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "CPU Load:", + "text_level": 1, + "bbox": [ + 80, + 839, + 168, + 853 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In Figure 5, we illustrate the CPU load of various ML-based IDS models under different cyberattacks in an SDN-enabled at the edge gateway. The analysis shows that KNN and DT models have the highest CPU load, especially during DDoS and DoS, indicating significant resource demands at", + "bbox": [ + 80, + 854, + 485, + 931 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "the edge. Conversely, the LSTM model demonstrates the lowest CPU load, highlighting its efficiency in resource management. The CNN model also performs efficiently but not as well as LSTM. The LSTM and CNN model hybrid, similar to EIDM, offers balanced performance, making them suitable for scenarios where moderate CPU efficiency is required at the edge.", + "bbox": [ + 507, + 520, + 915, + 625 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 509, + 626, + 658, + 640 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We conducted an ANOVA for the case of the DDoS attack, and the results are presented in Table 7. The results reveal significant differences in CPU load among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 142.57 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU load across IDSs compared to the variance in them, indicating a significant impact of IDS selection on CPU load. In addition, consistent p-values $(< 0.05)$ were observed across other cyber threats, including brute force, DoS, and the port scan, and we do not report the result. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS when subjected to different cyber threats.", + "bbox": [ + 507, + 641, + 915, + 836 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 11 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 952, + 416, + 965 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "Page 12 of 21", + "bbox": [ + 816, + 952, + 912, + 965 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 5: The Average CPU load of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 69, + 759, + 327 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg", + "table_caption": [ + "Table 7 ANOVA results: CPU load for ML-based IDS in SDN under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61184.21197.36142.57< 0.05
in groups91125.971.38
Total971310.1813.50
", + "bbox": [ + 142, + 419, + 853, + 491 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 107, + 518, + 166, + 534 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "The findings demonstrate that traditional ML-based IDS, e.g., DT, exhibit elevated loads under DDoS and DoS. In contrast, DL-based IDSs, including EIDM, LSTM, CNN, and their hybrids, demonstrate superior energy efficiency, making them suitable for SDN-enabled at the edge gateway. The integration of SDN helps balance network resource allocation. Yet, traditional ML-based IDS still exhibit higher CPU load due to their design, reinforcing the efficiency advantage of DL-based models in dynamic network environments.", + "bbox": [ + 105, + 544, + 460, + 709 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "CPU Usage:", + "text_level": 1, + "bbox": [ + 105, + 726, + 196, + 740 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Figure 6 shows that CPU usage across various ML-based IDS models in an SDN-enabled edge gateway is fairly consistent across different attack scenarios. Only minor variations are observed, as CNN, LSTM, and hybrid versions demonstrate relatively lower CPU usage, indicating efficient resource management. The DT, KNN, and RF models also show consistent CPU usage across attacks. The EIDM model balances efficiency and performance well.", + "bbox": [ + 80, + 741, + 485, + 862 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 80, + 862, + 230, + 876 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We conducted an ANOVA for the results we got for ML-based IDS in SDN under the DDoS attack. The results presented in Table 8 reveal significant differences in CPU", + "bbox": [ + 80, + 877, + 485, + 923 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "usage among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 5.94 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU usage across the group of IDSs compared to the variance in, indicating a significant impact of IDS selection on CPU usage. In addition, we observed a consistently low p-value $(< 0.05)$ for other examined cyber threats (not reported in the paper), including brute force, DoS, and port scan, reinforcing the presence of marked differences in CPU usage among diverse ML-based IDS when subjected to different cyber threats.", + "bbox": [ + 507, + 515, + 917, + 682 + ], + "page_idx": 12 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 416, + 965 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "Page 13 of 21", + "bbox": [ + 816, + 952, + 912, + 965 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 6: The Average CPU usage of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 69, + 752, + 327 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg", + "table_caption": [ + "Table 8 ANOVA results: CPU usage for ML-based IDS in SDN under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups627.974.665.94< 0.05
in groups9171.320.78
Total9799.301.02
", + "bbox": [ + 142, + 426, + 855, + 499 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 109, + 525, + 168, + 539 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "In the context of SDN-enhanced IoT, deploying DL-based IDS with advanced models such as CNN, LSTM, EIDM, and their hybrids demonstrates efficient energy consumption. These models achieve reduced CPU usage against brute force and port scan, benefiting from the centralized resource optimization afforded by SDN. Nonetheless, the complexity of DDoS and DoS presents a significant challenge, necessitating increased computational resources. Although SDN optimizes network operations, IDS models such as KNN and RF remain resource-intensive due to their frequent computational overhead. At the same time, DL-based IDS maintains efficiency through batch processing and learned representations.", + "bbox": [ + 107, + 551, + 463, + 778 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Energy consumption:", + "text_level": 1, + "bbox": [ + 105, + 795, + 265, + 809 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Figure 7 depicts the average energy consumption of ML-based IDS models under different attacks in an SDN environment. The results indicate that traditional ML models consume more energy, especially during port scans, e.g., DT, KNN, and RF. In contrast, the EIDM model consistently shows lower energy consumption across all attack types, highlighting its efficiency. The LSTM and CNN models display moderate energy usage, including their hybrid", + "bbox": [ + 80, + 810, + 485, + 931 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "version. Compared to non-SDN environments, the increased energy consumption in the SDN setup is attributed to the SDN controller's active role in traffic management and threat response, which demands more energy resources.", + "bbox": [ + 507, + 523, + 915, + 583 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Statistical Findings:", + "text_level": 1, + "bbox": [ + 509, + 583, + 658, + 598 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We applied ANOVA on energy consumption data across ML-based IDSs in SDN under DDoS. The results, presented in Table 9, reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by an impressive F-statistic of 18.27 and a p-value of $< 0.05$ . This F-statistic highlights the variance in energy consumption across a group of IDSs compared to the variance in, indicating a significant impact of IDS selection on energy consumption. Moreover, a consistently low p-value ( $< 0.05$ ) was observed across other cyber threats, including brute force, DoS, and port scan, so we do not report the results here. This highlights marked differences in CPU usage among diverse ML-based IDS when subjected to examined cyber threats.", + "bbox": [ + 507, + 598, + 915, + 809 + ], + "page_idx": 13 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 952, + 416, + 965 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "Page 14 of 21", + "bbox": [ + 818, + 952, + 912, + 965 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 7: The Average Energy consumption of ML-based IDS under cyber threats." + ], + "image_footnote": [], + "bbox": [ + 211, + 69, + 757, + 327 + ], + "page_idx": 14 + }, + { + "type": "table", + "img_path": "images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg", + "table_caption": [ + "Table 9 ANOVA results: Energy consumption for ML-based IDS in SDN under DDoS." + ], + "table_footnote": [], + "table_body": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61263.26210.5418.27< 0.05
in groups911048.2111.51
Total972311.4823.82
", + "bbox": [ + 142, + 424, + 855, + 496 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Finding", + "text_level": 1, + "bbox": [ + 107, + 520, + 164, + 537 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "The findings accentuate the distinct energy efficiency profiles of ML-based IDSs when exposed to various cyber threat scenarios. During brute force and the port scan, traditional ML-based IDS such as DT, KNN, and RF are observed to have higher energy consumption. This indicates that these models are not energy-efficient under the examined conditions due to their complex computational frameworks. On the other hand, DL-based IDS and the EIDM show markedly superior energy efficiency. The reduced energy footprint of DL-based IDS is especially advantageous in the context of the SDN-enabled at the edge, where low energy consumption is crucial due to device constraints and the need for long-term, autonomous operation. The reduction in energy consumption observed in DL-based IDS when integrated with SDN highlights the benefits of centralized network control and optimized workload distribution, making them a more sustainable choice for IoT security.", + "bbox": [ + 105, + 549, + 460, + 849 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "5.3. Analyzing the Impact of SDN on CPU Usage, Load, and Energy Efficiency in ML-Based IDS", + "text_level": 1, + "bbox": [ + 509, + 519, + 905, + 566 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 8 demonstrates that integrating SDN with ML-based IDS in the edge gateway significantly improves resource efficiency, reducing energy consumption, CPU usage, and CPU load. The most substantial improvement is in CPU usage, where DL-based IDS, e.g., LSTM and CNN, outperform traditional ML models by efficiently handling complex computations through parallel processing. Additionally, SDN integration reduces CPU load by balancing workloads, essential for real-time threat detection in edge gateway. The observed reduction in energy consumption further highlights the approach's suitability for battery-powered edge gateway, confirming its scalability and practicality for real-world applications.", + "bbox": [ + 507, + 568, + 917, + 765 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "6. ML-Based IDS vs. Signature-Based IDS (Snort)", + "text_level": 1, + "bbox": [ + 509, + 786, + 880, + 820 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "This section compares our ML-based IDS models and the signature-based Snort IDS to evaluate the performance improvements achieved by leveraging ML-based IDS over traditional detection systems. This comparison is essential to highlight the advantages of ML-based approaches regarding resource efficiency, scalability, and adaptability, especially in edge gateway.", + "bbox": [ + 507, + 825, + 915, + 931 + ], + "page_idx": 14 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 416, + 965 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "Page 15 of 21", + "bbox": [ + 816, + 952, + 912, + 965 + ], + "page_idx": 14 + }, + { + "type": "image", + "img_path": "images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg", + "image_caption": [ + "Evaluating Machine Learning-driven Intrusion Detection System", + "Figure 8: Reduction in energy consumption, CPU usage, and CPU load for ML-based IDS models with SDN integration in edge gateway." + ], + "image_footnote": [], + "bbox": [ + 236, + 68, + 759, + 288 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The results presented in Table 10 provide a comparative analysis of our ML-based IDS models against the signature-based Snort IDS discussed in other research.", + "bbox": [ + 82, + 358, + 484, + 402 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Regarding CPU usage, Snort IDS shows high utilization under heavy traffic due to its reliance on predefined rules and signature matching. In contrast, the ML-based IDS models demonstrate better CPU efficiency. While traditional ML models, e.g., DT and KNN, have higher CPU usage because of iterative computations, DL-based IDS, e.g., LSTM, CNN, and a hybrid of LSTM and CNN, EIDM exhibits lower CPU usage. This is primarily due to DL-based IDS's ability to process data in batches and leverage parallel processing for real-time threat detection. For energy consumption, Table 10 shows that Snort IDS consumes more energy, especially in IoT networks requiring multiple containers. However, our ML-based IDS models, especially DL architectures, e.g., LSTM and EIDM, demonstrate superior energy efficiency. These models optimize resource usage and process data efficiently, making them suitable for resource-constrained edge gateway and highlighting their scalability advantages. Finally, in terms of CPU load, Table 10 indicates that earlier versions of Snort IDS suffer from high CPU load on a single core because of their single-threaded architecture. Although newer versions introduce multi-threading, they still encounter processing bottlenecks under heavy traffic. Conversely, the ML-based IDS models distribute the CPU load more effectively across multiple cores. DL-based IDS, especially LSTM and hybrid architectures, achieve the lowest CPU load levels due to their parallel execution capabilities and efficient handling of sequential data.", + "bbox": [ + 82, + 404, + 485, + 809 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "7. Discussion", + "text_level": 1, + "bbox": [ + 84, + 832, + 201, + 847 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our investigations explored the performance metrics of ML-based IDS with various models, especially in IoT-edge devices with and without SDN integration. Our study was primarily evaluating the impact of these models on CPU load, CPU usage, and energy consumption amidst diverse", + "bbox": [ + 84, + 854, + 484, + 928 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "cyberattack scenarios. The empirical findings revealed significant disparities in resource utilization across different ML-based IDS, shedding light on crucial aspects of their deployment in IoT devices integrated with SDN. The KNN, DT, and RF significantly exhibited higher CPU load, CPU usage, and energy consumption, especially under specific types of cyberattacks. While these models are adept at identifying threats, their resource-intensive nature could pose challenges in the IoT context, where computational resources are often limited. This could lead to diminished performance or instability in environments with constrained resources. Specifically, KNN's higher variance in CPU load and energy consumption, as observed in Tables 4 and 5, stems from its lazy learning approach. Unlike other models, KNN does not build a generalized model during training but instead stores the entire dataset and computes distances at query time. This results in increased processing demands, leading to fluctuations in resource utilization. Such behavior makes KNN less suitable for real-time IDS applications in resource-constrained IoT networks[72] [73]. While CPU load significantly impacts energy consumption, it is not the sole factor. Memory operations, network activity, peripheral devices, and thermal management also contribute to power usage in IoT devices. High data transmission rates and active sensors can increase energy demands, while sustained CPU load may trigger additional energy consumption for cooling mechanisms. Although a strong correlation between CPU load and energy consumption is expected, these factors introduce variations across IDS models. Optimizing IDS efficiency can help balance security and resource constraints in IoT networks. Conversely, the CNN and LSTM models demonstrated greater efficiency in resource utilization. While their architectures are sophisticated and adept at processing complex data structures, they appear to optimize the computational load during inference when employed in IDS. This makes them more suitable for scenarios where resource conservation is critical. However, the complexity of these models introduces its own set of challenges, especially", + "bbox": [ + 512, + 356, + 915, + 929 + ], + "page_idx": 15 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "Page 16 of 21", + "bbox": [ + 818, + 953, + 912, + 965 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg", + "table_caption": [ + "Table 17 Comparative Resource Utilization of ML-Based IDS and Snort IDS Based" + ], + "table_footnote": [], + "table_body": "
MetricSnort IDSML-Based IDS (Our Findings)
CPU Usage- High Traffic Conditions: CPU usage can reach its maximum during initialization with many active rules [67].\n- Multi-Core Systems: Snort 3.0 utilizes a significant portion of CPU resources on a multi-core processor [68] [69].- Traditional ML Models (DT, KNN, RF): Tend to exhibit higher CPU usage during real-time cyber threats, especially those requiring intensive computations.\n- DL-Based Models (CNN, LSTM, Hybrid of LSTM and CNN and EIDM): Show lower CPU usage compared to traditional ML models, with LSTM models demonstrating the most efficient utilization due to sequential data processing and parallelization.
Energy Consumption- IoT Deployment: Deployment of Snort on IoT gateways results in considerable energy consumption [70].- Traditional ML-based IDS: Generally consume more energy during inference cycles due to repetitive computations.\n- DL-Based Models: Exhibit better energy efficiency, especially models that combine convolutional and sequential layers, benefiting from optimized processing structures.
CPU Load- Single-Core Utilization: Older Snort versions (pre-3.0) lead to high load on a single core under heavy traffic [71].\n- Multi-Core Systems: Updated versions distribute the load but still face processing bottlenecks under extensive traffic [71].- Traditional ML-based IDS: Often show higher CPU load during complex attack scenarios.\n- DL-Based Models: Maintain a lower CPU load, benefiting from parallel processing capabilities, with hybrid models showing the most balanced load distribution.
", + "bbox": [ + 107, + 104, + 890, + 457 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "in terms of training and ongoing maintenance in the dynamic landscape of IoT devices integrated with SDN.", + "bbox": [ + 80, + 480, + 485, + 509 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The balance between detection efficiency and resource consumption is especially critical at edge gateway, where devices often have limited processing power and energy reserves. This balance is closely tied to several United Nations Sustainable Development Goals (SDGs), especially SDG 9 (Industry, Innovation, and Infrastructure), SDG 11 (Sustainable Cities and Communities), and SDG 13 (Climate Action). Optimizing IDS deployment in smart cities strengthens cybersecurity infrastructure, directly supporting SDG 9 while fostering resilient, sustainable urban environments in line with SDG 11. Furthermore, by prioritizing energy-efficient IDS solutions, this research contributes to SDG 13, promoting responsible resource consumption and mitigating the environmental impact of growing IoT networks [74].", + "bbox": [ + 80, + 511, + 485, + 736 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "To aid IoT developers in selecting appropriate IDS solutions, we provide detailed guidelines in Table 11 and Table 12, outlining the performance trade-offs of seven different ML-based IDS models for IoT devices examined in this paper, both with and without SDN integration. These insights enable developers to make informed decisions, ensuring the optimal balance between security and resource efficiency during application development. We use graphical indicators (smiley faces) instead of numerical values to provide an intuitive, high-level comparison of IDS performance. This visual approach simplifies decision-making for IoT developers, aligning with similar methodologies used in prior work [75]. Moreover, all corresponding numerical values", + "bbox": [ + 80, + 736, + 485, + 931 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "related to CPU usage, CPU load, and energy consumption are presented in the Figures and Tables in Section 5.", + "bbox": [ + 507, + 480, + 914, + 509 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "On the other hand, to the best of our knowledge, only Tekin et al. [12] have explored a similar direction in evaluating the performance of ML-based IDS in IoT systems. However, our study takes a fundamentally different approach, especially in how computational resources are classified and utilized, which plays a critical role in the effectiveness and scalability of IoT systems. While Tekin et al. focus on energy consumption and inference times using Raspberry Pi as an IoT device, our study emphasizes the advantages of processing data at the edge, especially regarding energy efficiency, CPU load, and usage. We show how models such as DT and RF benefit from edge processing, reducing latency and improving responsiveness, especially when combined with SDN, which optimizes network traffic and resource allocation. Our findings underscore the importance of balancing computational tasks across the network using SDN to maintain performance, unlike Tekin et al. [12], who do not explore the impact of edge computing or SDN integration.", + "bbox": [ + 507, + 511, + 915, + 796 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "8. Threat and validity", + "text_level": 1, + "bbox": [ + 509, + 818, + 705, + 834 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Empirical research inevitably encounters issues related to the validity of findings. In light of this, the present section seeks to identify and discuss possible threats to our research's validity, per the recommendations of Wohlin et al. [76].", + "bbox": [ + 507, + 839, + 915, + 902 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 16 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 950, + 416, + 965 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "Page 17 of 21", + "bbox": [ + 816, + 950, + 914, + 967 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg", + "table_caption": [ + "Table 11 Guideline for selecting seven ML-based IDS in edge gateway." + ], + "table_footnote": [], + "table_body": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
", + "bbox": [ + 179, + 104, + 816, + 168 + ], + "page_idx": 17 + }, + { + "type": "table", + "img_path": "images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg", + "table_caption": [ + "Table 12 Guideline for selecting seven ML-based IDS in SDN-edge gateway." + ], + "table_footnote": [], + "table_body": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
", + "bbox": [ + 179, + 218, + 816, + 281 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The energy consumption and CPU usage in all ML-based IDS lowered during the brute force attack and port scan.", + "bbox": [ + 107, + 283, + 867, + 298 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "8.1. Internal Threats", + "text_level": 1, + "bbox": [ + 82, + 322, + 253, + 337 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "During our empirical study on ML-based IDS in the context of IoT devices with IoT devices integrated with SDN, we recognized the existence of internal obstacles that impact the credibility of our findings. The precision of our performance measures is of utmost importance, namely the measurement of CPU load, CPU usage, and energy consumption in these intricate network settings. The complex characteristics of IoT devices and the adaptable structure of SDN provide significant difficulties in guaranteeing accurate and dependable performance evaluations. To address these concerns, we performed fifteen experiments on our testbeds. To improve the trustworthiness of our results in the context of SDN and IoT, we utilized average values to reduce the impact of network or hardware differences and ambient factors. In addition, the cyber threat simulations were conducted using highly practiced cyber security testing mechanisms in academic research and industries in IoT-edge devices integrated with SDN. This work aims to tackle internal risks associated with the setup and precision of ML-based IDS, improving their usefulness and significance in these fast-advancing technical fields.", + "bbox": [ + 82, + 339, + 485, + 654 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "8.2. External Threats:", + "text_level": 1, + "bbox": [ + 82, + 667, + 263, + 682 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The landscape of network security, especially in IoT-edge devices and IoT-edge devices integrated with SDN realms, is increasingly challenged by external threats. These range from sophisticated cyberattacks such as DoS, DDoS, and brute force attacks to more subtle, yet equally harmful, reconnaissance methods such as a port scan. These threats highlight the urgent need for robust and adaptable IDS solutions. Integrating ML into IDS presents promising advancements in threat detection and mitigation. However, this integration faces challenges due to the complexity of IoT-edge devices, which are marked by numerous interconnected devices, and the dynamic nature of SDN architectures. IDS solutions must be precise in threat detection while also being resource-efficient. Our research evaluates ML-based IDS based on CPU usage, CPU load, and energy consumption, especially under real-time cyber threats. These metrics are", + "bbox": [ + 82, + 683, + 485, + 924 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "vital to ensure that ML-based IDS are effective in protecting networks against external threats and sustainable in their operation. They help maintain a crucial balance between security and performance in the complex ecosystems of IoT devices and IoT devices integrated with SDN. Additionally, to ensure the transparency and reproducibility of our study, we have provided detailed information about the experimental setup and made our testbed and results publicly available for further research [77]. By adopting these measures, we have attempted to provide robust validation and increase the inability to reject our findings among practitioners and researchers.", + "bbox": [ + 512, + 322, + 915, + 501 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "9. Conclusion", + "text_level": 1, + "bbox": [ + 512, + 523, + 635, + 538 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "This paper presents a comparative analysis of the ML-based IDS in IoT-edge devices and IoT-edge devices integrated with SDN under different cyberattack scenarios, resulting in comprehension. In IoT systems, conventional ML models (e.g., KNN and DT) often experience increased CPU load and CPU usage, especially when subjected to DoS and DDoS cyber threats. This suggests that these models have limits in resource-limited situations. In contrast, DL-based IDS (e.g., CNN and LSTM) exhibit reduced CPU usage, indicating improved efficiency and compatibility with IoT security. A consistent energy consumption pattern was identified across attack types in both scenarios, encompassing advanced neural networks and conventional methods. The consistent energy efficiency of these models, independent of their computing complexity, highlights their efficacy and long-term viability for use in different network environments. The findings emphasize the significance of choosing ML-based IDS according to their computational efficiency and energy consumption to achieve optimal performance in networks with limited resources. It is imperative to thoroughly evaluate the scalability and robustness of ML-based IDS in future research, especially in more significant and more complex network environments. This assessment will explain their ability to adjust to changing cyber threats. Furthermore, it is crucial to evaluate the influence of new technologies, e.g., 5G and edge computing, on the efficacy", + "bbox": [ + 512, + 544, + 915, + 935 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 17 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 84, + 953, + 415, + 965 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "Page 18 of 21", + "bbox": [ + 818, + 953, + 912, + 965 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "and suitability of ML-based IDS in advanced network infrastructures.", + "bbox": [ + 80, + 68, + 485, + 98 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Future research directions should pivot towards optimizing ML-based IDS for enhanced scalability, real-time processing, and energy consumption. The overarching challenge is to develop effective threat detection models that minimally impact system resources. Furthermore, integrating these models into existing IoT devices and IoT devices integrated with SDN infrastructures presents additional challenges, including ensuring compatibility, scalability, and ease of maintenance.", + "bbox": [ + 80, + 99, + 485, + 233 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "A. Conflict of interest", + "text_level": 1, + "bbox": [ + 80, + 254, + 275, + 269 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The authors declare that they have no known conflict of interest or personal relationships that could have appeared to influence the work reported in this paper.", + "bbox": [ + 80, + 276, + 485, + 321 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "B. Acknowledgement", + "text_level": 1, + "bbox": [ + 80, + 343, + 275, + 360 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The authors thank Dr. Karim A. Emara et al. for collaborating to share the EIDM-IDS source code.", + "bbox": [ + 80, + 365, + 485, + 394 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 82, + 417, + 181, + 432 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] D. G. Chowdhry, R. Verma, M. Mathur, The Evolution of Business in the Cyber Age: Digital Transformation, Threats, and Security, CRC Press, 2020.", + "[2] B. Kaur, S. Dadkhah, F. Shoeleh, al., Internet of things (iot) security dataset evolution: Challenges and future directions, Internet of Things (2023) 100780.", + "[3] S. Hadzovic, S. Mrdovic, M. Radonjic, A path towards an internet of things and artificial intelligence regulatory framework, IEEE Communications Magazine (2023).", + "[4] K. L. M. Ang, J. K. P. Seng, E. Ngharamike, Towards crowdsourcing internet of things (crowd-iot): Architectures, security, and applications, Future Internet 14 (2) (2022) 49.", + "[5] M. Ahmid, O. Kazar, A comprehensive review of the internet of things security, Journal of Applied Security Research 18 (3) (2023) 289-305.", + "[6] P. Mall, R. Amin, A. K. Das, M. T. Leung, K.-K. R. Choo, Puf-based authentication and key agreement protocols for IoT, wsns, and smart grids: a comprehensive survey, IEEE Internet of Things Journal 9 (11) (2022) 8205-8228.", + "[7] A. Lakhlan, M. A. Mohammed, K. H. Abdulkareem, M. M. Jaber, J. Nedoma, R. Martinek, P. Zmij, Delay optimal schemes for internet of things applications in heterogeneous edge cloud computing networks, Sensors 22 (16) (2022) 5937.", + "[8] P. Malhotra, Y. Singh, P. Anand, Bangotra, al, Internet of things: Evolution, concerns and security challenges, Sensors 21 (5) (2021) 1809.", + "[9] A. Djenna, S. Harous, D. E. Saidouni, Internet of things meet the internet of threats: New concern cyber security issues of critical cyber infrastructure, Applied Sciences 11 (10) (2021) 4580.", + "[10] M. Almiani, A. AbuGhazleh, A. Al-Rahayfeh, S. Atiewi, A. Razaque, Deep recurrent neural network for IoT intrusion detection system, Simulation Modelling Practice and Theory 101 (2020) 102031.", + "[11] T. Rajmohan, P. H. Nguyen, N. Ferry, Research landscape of patterns and architectures for IoT security: a systematic review, in: 2020 46th Euromicro conference on software engineering and advanced applications (SEAA), IEEE, 2020, pp. 463-470.", + "[12] N. Tekin, A. Acar, A. Aris, A. S. Uluagac, V. C. Gungor, Energy consumption of on-device machine learning models for IoT intrusion detection, Internet of Things 21 (2023) 100670." + ], + "bbox": [ + 82, + 438, + 485, + 915 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[13] A. Hakiri, A. Gokhale, P. Berthou, D. C. Schmidt, T. Gayraud, Software-defined networking: Challenges and research opportunities for future internet, Computer Networks 75 (2014) 453-471.", + "[14] K. H. K. Reddy, A. K. Luhach, V. V. Kumar, S. Pratihar, D. Kumar, D. S. Roy, Towards energy efficient smart city services: A software defined resource management scheme for data centers, Sustainable Computing: Informatics and Systems 35 (2022) 100776.", + "[15] A. Montazerolghaem, Software-defined internet of multimedia things: Energy-efficient and load-balanced resource management, IEEE Internet of Things Journal 9 (3) (2021) 2432-2442.", + "[16] J. Liu, H. Shen, H. S. Narman, W. Chung, Z. Lin, A survey of mobile crowdsensing techniques: A critical component for the internet of things, ACM Transactions on Cyber-Physical Systems 2 (3) (2018) 1-26.", + "[17] B. B. Gupta, M. Quamara, An overview of internet of things (iot): Architectural aspects, challenges, and protocols, Concurrency and Computation: Practice and Experience 32 (21) (2020) e4946.", + "[18] A. A. Alsulami, Q. A. Al-Haija, A. Tayeb, Anomaly-based intrusion detection system for IoT networks with improved data engineering (2022).", + "[19] I. Mukherjee, N. K. Sahu, S. K. Sahana, Simulation and modeling for anomaly detection in IoT network using machine learning, International Journal of Wireless Information Networks 30 (2) (2023) 173-189.", + "[20] O. Elnakib, E. Shaaban, M. Mahmoud, K. Emara, Eidm: deep learning model for IoT intrusion detection systems, The Journal of Supercomputing (2023) 1-21.", + "[21] M. Douiba, S. Benkirane, A. Guezzzaz, M. Azrour, An improved anomaly detection model for IoT security using decision tree and gradient boosting, The Journal of Supercomputing 79 (3) (2023) 3392-3411.", + "[22] S. M. Kasongo, Y. Sun, A deep learning method with wrapper-based feature extraction for wireless intrusion detection system, Computers & Security 92 (2020) 101752.", + "[23] A. Verma, V. Ranga, Machine learning-based intrusion detection systems for IoT applications, Wireless Personal Communications 111 (2020) 2287-2310.", + "[24] Y. Otoum, D. Liu, A. Nayak, Dl-ids: a deep learning-based intrusion detection framework for securing IoT, Transactions on Emerging Telecommunications Technologies 33 (3) (2022) e3803.", + "[25] T. Gaber, A. El-Ghamry, A. E. Hassanien, Injection attack detection using machine learning for smart IoT applications, Physical Communication 52 (2022) 101685.", + "[26] U. Sachdeva, P. R. Vamsi, Analysis of deep learning models for anomaly detection in time series IoT sensor data, in: Proceedings of the 2022 Fourteenth International Conference on Contemporary Computing, 2022, pp. 54-62.", + "[27] K. Nimmy, M. Dilraj, S. Sankaran, K. Achuthan, Leveraging power consumption for anomaly detection on IoT devices in smart homes, Journal of Ambient Intelligence and Humanized Computing (2022) 1-12.", + "[28] R. Chaganti, W. Suliman, V. Ravi, A. Dua, Deep learning approach for sdn-enabled intrusion detection system in IoT networks, Information 14 (1) (2023) 41.", + "[29] M. M. Isa, L. Mhamdi, Hybrid deep autoencoder with random forest in native sdn intrusion detection environment, in: ICC 2022-IEEE International Conference on Communications, IEEE, 2022, pp. 1698-1703.", + "[30] P. T. Duy, H. Do Hoang, N. H. Khoa, V.-H. Pham, et al., Fool your enemies: Enable cyber deception and moving target defense for intrusion detection in sdn, in: 2022 21st International Symposium on Communications and Information Technologies (ISCIT), IEEE, 2022, pp. 27-32.", + "[31] M. A. Bouke, A. Abdullah, S. H. ALshatebi, M. T. Abdullah, E2ids: An enhanced intelligent intrusion detection system based on decision tree algorithm, Journal of Applied Artificial Intelligence 3 (1) (2022) 1-16." + ], + "bbox": [ + 512, + 70, + 914, + 909 + ], + "page_idx": 18 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 18 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 952, + 415, + 965 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "Page 19 of 21", + "bbox": [ + 818, + 952, + 912, + 965 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[32] L. A. C. Ahakonye, C. I. Nwakanma, J.-M. Lee, D.-S. Kim, Scada intrusion detection scheme exploiting the fusion of modified decision tree and chi-square feature selection, Internet of Things 21 (2023) 100676.", + "[33] M. Hammad, N. Hewahi, W. Elmedany, Mmm-rf: A novel high accuracy multinomial mixture model for network intrusion detection systems, Computers & Security 120 (2022) 102777.", + "[34] K. Albulayhi, Q. Abu Al-Haija, S. A. Alsuhibany, A. A. Jillepalli, M. Ashrafuzzaman, F. T. Sheldon, Iot intrusion detection using machine learning with a novel high performing feature selection method, Applied Sciences 12 (10) (2022) 5015.", + "[35] H. Yang, S. Liang, J. Ni, H. Li, X. S. Shen, Secure and efficient km classification for industrial internet of things, IEEE Internet of Things Journal 7 (11) (2020) 10945-10954.", + "[36] A. D. Afifaturahman, M. Firmansyah, Perbandingan algorithm k-nearest neighbour (knn) dan naive bayes pada intrusion detection system (ids), Innovation in Research of Informatics (INNOVATICs) 3 (1) (2021).", + "[37] F. Z. Belgrana, N. Benamrane, M. A. Hamaida, A. M. Chaabani, A. Taleb-Ahmed, Network intrusion detection system using neural network and condensed nearest neighbors with selection of nsl-kdd influencing features, in: 2020 IEEE International Conference on Internet of Things and Intelligence System (IoTaIS), IEEE, 2021, pp. 23-29.", + "[38] Y. Yan, L. Qi, J. Wang, Y. Lin, L. Chen, A network intrusion detection method based on stacked autoencoder and LSTM, in: ICC 2020-2020 IEEE International Conference on Communications (ICC), IEEE, 2020, pp. 1-6.", + "[39] M. D. Hossain, H. Inoue, H. Ochiai, D. Fall, Y. Kadobayashi, Lstmbased intrusion detection system for in-vehicle can bus communications, IEEE Access 8 (2020) 185489-185502.", + "[40] A. El-Ghamry, A. Darwish, A. E. Hassanien, An optimized cnn-based intrusion detection system for reducing risks in smart farming, Internet of Things 22 (2023) 100709.", + "[41] S. Jamshidi, A. Nikanjam, M. A. Hamdaqa, F. Khomh, Attack detection by using deep learning for cyber-physical system, in: Artificial Intelligence for Cyber-Physical Systems Hardening, Springer, 2022, pp. 155–179.", + "[42] P. Sun, P. Liu, Q. Li, C. Liu, X. Lu, R. Hao, J. Chen, Dl-ids: Extracting features using cnn-lstm hybrid network for intrusion detection system, Security and communication networks 2020 (2020) 1–11.", + "[43] A. Halbouni, T. S. Gunawan, M. H. Habaebi, M. Halbouni, M. Kartiwi, R. Ahmad, Cnn-lstm: hybrid deep neural network for network intrusion detection system, IEEE Access 10 (2022) 99837-99849.", + "[44] D. Stiawan, M. Y. B. Idris, A. M. Bamhdi, R. Budiarto, et al., Cicids-2017 dataset feature analysis with information gain for anomaly detection, IEEE Access 8 (2020) 132911–132921.", + "[45] R. Panigrahi, S. Borah, A detailed analysis of cicids2017 dataset for designing intrusion detection systems, International Journal of Engineering & Technology 7 (3.24) (2018) 479-482.", + "[46] A. A. Alsulami, Q. Abu Al-Haija, A. Tayeb, A. Alqahtani, An intrusion detection and classification system for IoT traffic with improved data engineering, Applied Sciences 12 (23) (2022) 12336.", + "[47] L. Yang, A. Moubayed, I. Hamieh, A. Shami, Tree-based intelligent intrusion detection system in internet of vehicles, in: 2019 IEEE global communications conference (GLOBECOM), IEEE, 2019, pp. 1-6.", + "[48] Great Learning, Label encoding in python, [link], accessed: 2024-03-21 (n.d.).", + "[49] Analytics Vidhya, Overcoming class imbalance using smote techniques, [link], accessed: 2024-03-21 (2020).", + "[50] T. N. Sainath, O. Vinyals, A. Senior, H. Sak, Convolutional, long short-term memory, fully connected deep neural networks, in: 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, 2015, pp. 4580-4584.", + "[51] L. Muhammad, A. A. Haruna, U. S. Sharif, M. B. Mohammed, Cnn-lstm deep learning based forecasting model for Covid-19 infection cases in nigeria, south africa and botswana, Health and technology 12 (6) (2022) 1259–1276." + ], + "bbox": [ + 82, + 70, + 485, + 923 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[52] L. Alzubaidi, J. Zhang, A. J. Humaidi, A. Al-Dujaili, Y. Duan, O. Al-Shamma, J. Santamaría, M. A. Fadhel, M. Al-Amidie, L. Farhan, Review of deep learning: concepts, cnn architectures, challenges, applications, future directions, Journal of big Data 8 (2021) 1-74.", + "[53] G. Najera-Gutierrez, J. A. Ansari, Web Penetration Testing with Kali Linux: Explore the methods and tools of ethical hacking with Kali Linux, Packt Publishing Ltd, 2018.", + "[54] S. Asadollahi, B. Goswami, M. Sameer, Ryu controller's scalability experiment on software defined networks, in: 2018 IEEE international conference on current trends in advanced computing (ICCTAC), IEEE, 2018, pp. 1-5.", + "[55] K. Kaur, J. Singh, N. S. Ghumman, Mininet as software defined networking testing platform, in: International conference on communication, computing & systems (ICCCS), 2014, pp. 139-42.", + "[56] L. St, S. Wold, et al., Analysis of variance (anova), Chemometrics and intelligent laboratory systems 6 (4) (1989) 259-272.", + "[57] D. Breitenbacher, I. Homoliak, Y. L. Aung, N. O. Tippenhauer, Y. Elovici, Hades-iot: A practical host-based anomaly detection system for iot devices, in: Proceedings of the 2019 ACM Asia conference on computer and communications security, 2019, pp. 479-484.", + "[58] B. Chen, Y. Zhang, G. Iosifidis, M. Liu, Reinforcement learning on computational resource allocation of cloud-based wireless networks, in: 2020 IEEE 6th World Forum on Internet of Things (WF-IoT), IEEE, 2020, pp. 1-6.", + "[59] R. D. Corin, A. Costanzo, F. Callegati, D. Siracusa, Methods and techniques for dynamic deployability of software-defined security services, CoRR (2020).", + "[60] A. van de Ven, Powertop, [link].", + "[61] N. F. Syed, Z. Baig, A. Ibrahim, C. Valli, Denial of service attack detection through machine learning for the IoT, Journal of Information and Telecommunication 4 (4) (2020) 482-503.", + "[62] K. Sonar, H. Upadhyay, A survey: Ddos attack on internet of things, International Journal of Engineering Research and Development 10 (11) (2014) 58-63.", + "[63] M. M. Raikar, S. Meena, Ssh brute force attack mitigation in internet of things (iot) network: An edge device security measure, in: 2021 2nd international conference on secure cyber computing and communications (ICSCCC), IEEE, 2021, pp. 72-77.", + "[64] Q. A. Al-Haija, E. Saleh, M. Alnabhan, Detecting port scan attacks using logistic regression, in: 2021 4th International symposium on advanced electrical and communication technologies (ISAECT), IEEE, 2021, pp. 1-5.", + "[65] Z. Campbell, A. Bray, A. Ritz, A. Groce, Differentially private anova testing, in: 2018 1st International Conference on Data Intelligence and Security (ICDIS), IEEE, 2018, pp. 281-285.", + "[66] H. Wei, X. Song, Smooth tests for normality in anova, arXiv preprint arXiv:2110.04849 (2021).", + "[67] E. Frimpong, A performance study of the snort ids (2008).", + "[68] D. Fadhilah, M. I. Marzuki, Performance analysis of ids snort and ids suricata with many-core processor in virtual machines against dos/ddos attacks, in: 2020 2nd International Conference on Broadband Communications, Wireless Sensors and Powering (BCWSP), IEEE, 2020, pp. 157-162.", + "[69] M. Hawedi, C. Talhi, H. Boucheneb, Multi-tenant intrusion detection system for public cloud (mtids), The Journal of Supercomputing 74 (2018) 5199–5230.", + "[70] S. M. Raza, J. Jeong, M. Kim, B. Kang, H. Choo, Empirical performance and energy consumption evaluation of container solutions on resource constrained IoT gateways, Sensors 21 (4) (2021) 1378.", + "[71] W. Park, S. Ahn, Performance comparison and detection analysis in snort and suricata environment, Wireless Personal Communications 94 (2017) 241-252.", + "[72] E. Ozturk Kiyak, B. Ghasemkhani, D. Birant, High-level k-nearest neighbors (hlknn): A supervised machine learning model for classification analysis, Electronics 12 (18) (2023) 3828.", + "[73] E. Altulaihan, M. A. Almaiah, A. Aljughaiman, Anomaly detection ids for detecting dos attacks in IoT networks based on machine learning" + ], + "bbox": [ + 512, + 70, + 912, + 923 + ], + "page_idx": 19 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 19 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 952, + 415, + 965 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "Page 20 of 21", + "bbox": [ + 818, + 952, + 912, + 965 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "algorithms, Sensors 24 (2) (2024) 713.", + "bbox": [ + 112, + 71, + 324, + 83 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "[74] U. Nations, United nations goals: Sustainable development, [link], accessed: September 3, 2024 (2023).", + "[75] F. Khomh, S. A. Abtahizadeh, Understanding the impact of cloud patterns on performance and energy consumption, Journal of Systems and Software 141 (2018) 151-170.", + "[76] C. Wohlin, P. Runeson, M. Höst, M. C. Ohlsson, B. Regnell, A. Wesslén, Experimentation in software engineering, Springer Science & Business Media, 2012.", + "[77] S. Jamshidi, Replication packages, [link]." + ], + "bbox": [ + 82, + 84, + 485, + 196 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "Appendix", + "text_level": 1, + "bbox": [ + 510, + 70, + 601, + 89 + ], + "page_idx": 20 + }, + { + "type": "table", + "img_path": "images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg", + "table_caption": [ + "Table 13: Abbreviations used in this research." + ], + "table_footnote": [], + "table_body": "
AbbreviationMeaning
AIArtificial Intelligence
ANOVAAnalysis of Variance
ANNArtificial Neural Network
BTBoosting Tree
CPUCentral Processing Unit
DAEDeep Autoencoder
DDoSDistributed Denial-of-Service
DLDeep Learning
DoSDenial-of-Service
DTDecision Tree
GPUGraphics Processing Unit
IDSIntrusion Detection System
IoTInternet of Things
KNNK-Nearest Neighbor
LRLogistic Regression
LSTMLong Short-Term Memory
CNNConvolutional Neural Network
MCUMicrocontroller Unit
MITMMan-in-the-Middle
MLMachine Learning
MTDMoving Target Defense
NBNaïve Bayes
R2LRoot to Local
RFRandom Forest
RNNRecurrent Neural Network
SDNSoftware-Defined Networking
SDPNStacked-Deep Polynomial Network
SMOSpider Monkey Optimization
SMOTESynthetic Minority Oversampling Technique
SNNSpiking Neural Network
SVMSupport Vector Machine
U2RUser to Root
WFEUWrapper Feature Extraction Unit
WSNWireless Sensor Network
", + "bbox": [ + 512, + 129, + 951, + 660 + ], + "page_idx": 20 + }, + { + "type": "header", + "text": "Evaluating Machine Learning-driven Intrusion Detection System", + "bbox": [ + 284, + 42, + 712, + 56 + ], + "page_idx": 20 + }, + { + "type": "footer", + "text": "Saeid Jamshidi et al.: Preprint submitted to Elsevier", + "bbox": [ + 82, + 952, + 415, + 965 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "Page 21 of 21", + "bbox": [ + 818, + 952, + 912, + 965 + ], + "page_idx": 20 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_model.json b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_model.json new file mode 100644 index 0000000000000000000000000000000000000000..9d45e01f13492b6dce93bc100b72bad09679f039 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_model.json @@ -0,0 +1,4235 @@ +[ + [ + { + "type": "title", + "bbox": [ + 0.082, + 0.064, + 0.884, + 0.113 + ], + "angle": 0, + "content": "Evaluating Machine Learning-Driven Intrusion Detection Systems in IoT: Performance and Energy Consumption" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.125, + 0.647, + 0.143 + ], + "angle": 0, + "content": "Saeid Jamshidi, Kawser Wazed Nafi, Amin Nikanjam, Foutse Khomh" + }, + { + "type": "text", + "bbox": [ + 0.082, + 0.156, + 0.408, + 0.17 + ], + "angle": 0, + "content": "SWAT, Polytechnique, Montréal, H3T 1J4, Quebec, Canada" + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.188, + 0.226, + 0.203 + ], + "angle": 0, + "content": "ARTICLEINFO" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.214, + 0.144, + 0.226 + ], + "angle": 0, + "content": "Keywords:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.226, + 0.295, + 0.273 + ], + "angle": 0, + "content": "Machine Learning, Intrusion Detection System, Energy Consumption, Software-Defined Networking, SDN-IoT" + }, + { + "type": "title", + "bbox": [ + 0.373, + 0.19, + 0.483, + 0.203 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.37, + 0.214, + 0.916, + 0.309 + ], + "angle": 0, + "content": "In the landscape of network security, the integration of Machine Learning (ML)-based Intrusion Detection System (IDS) represents a significant leap forward, especially in the domain of the Internet of Things (IoT) and Software-Defined Networking (SDN). Such ML-based IDS are crucial for improving security infrastructures, and their importance is increasingly pronounced in IoT systems. However, despite the rapid advancement of ML-based IDS, there remains a gap in understanding their impact on critical performance metrics (e.g., CPU load, energy consumption, and CPU usage) in resource-constrained IoT devices. This becomes especially crucial in scenarios involving real-time cyber threats that challenge IoT devices in a public/private network." + }, + { + "type": "text", + "bbox": [ + 0.37, + 0.31, + 0.916, + 0.465 + ], + "angle": 0, + "content": "To address this gap, this article presents an empirical study that evaluates the impact of state-of-the-art ML-based IDSs on performance metrics such as CPU usage, energy consumption, and CPU load in the absence and presence of real-time cyber threats, with a specific focus on their deployment at the edge of IoT infrastructures. We also incorporate SDN to evaluate the comparative performance of ML-based IDSs with and without SDN. To do so, we focus on the impact of both SDN's centralized control and dynamic resource management on the performance metrics of an IoT system. Finally, we analyze our findings using statistical analysis using the Analysis of Variance (ANOVA) analysis. Our findings demonstrate that traditional ML-based IDS, when implemented at the edge gateway with and without SDN architecture, significantly affects performance metrics against cyber threats compared to DL-based ones. Also, we observed substantial increases in energy consumption, CPU usage, and CPU load during real-time cyber threat scenarios at the edge, underscoring the resource-intensive nature of these systems. This research fills the existing knowledge void and delivers essential insights into the operational dynamics of ML-based IDS at edge gateway in IoT systems." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.491, + 0.221, + 0.507 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.512, + 0.486, + 0.693 + ], + "angle": 0, + "content": "The rapid expansion of the Internet of Things (IoT) has ushered in an era where data flows seamlessly across various sectors, driving profound changes in how devices interact [1][2]. This intricate IoT ecosystem, composed of countless devices, sensors, and intelligent nodes, has fundamentally reshaped how we think about device communication, significantly minimizing the need for human involvement [3]. The integration of Software-Defined Networking (SDN) within the IoT landscape represents a significant step forward, creating a unified IoT-SDN framework that offers centralized control, improved network management, and stronger security measures [4][5]." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.693, + 0.486, + 0.827 + ], + "angle": 0, + "content": "The rapid expansion of IoT, driven by the interconnection of millions of devices via Wireless Sensor Networks (WSNs), presents significant challenges [6]. These challenges stem mainly from these devices' limited memory, power, and battery life, highlighting the need for optimized computing and advanced data analysis techniques [7]. Deploying SDN within this framework aims to overcome these obstacles by offering a streamlined, secure network infrastructure that facilitates effective resource allocation and enhanced threat" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.491, + 0.607, + 0.504 + ], + "angle": 0, + "content": "management." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.504, + 0.916, + 0.609 + ], + "angle": 0, + "content": "Given the widespread security vulnerabilities in IoT networks, such as service disruptions and unauthorized access, the importance of Machine Learning (ML)-based Intrusion Detection Systems (IDS) has grown [8]. ML-based IDS are crucial for protecting network integrity due to their ability to adapt dynamically and effectively identify threats [9][10] [11]." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.609, + 0.916, + 0.789 + ], + "angle": 0, + "content": "However, despite advancements in developing ML-based IDS for IoT, several critical gaps remain, as highlighted by Tekin et al. [12]. While previous research has examined ML-based IDS's performance in controlled, static testbed environments, there is a significant gap in understanding how these systems operate under the dynamic conditions of real-time cyber threats, especially when IoT is integrated with SDN. Moreover, while the potential of SDN to significantly enhance resource management in IoT systems is widely acknowledged [13][14][15], there is a lack of empirical evidence on how SDN interacts with ML-based IDS during cyber threats." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.79, + 0.916, + 0.925 + ], + "angle": 0, + "content": "In this study, we set two primary objectives designed to deepen our understanding of network performance metrics in IoT. Firstly, we assess the impact of deploying ML-based IDS at edge gateway, mainly focusing on ML-based IDS performance metrics under real-time cyber threats. Secondly, we explore the impact of integrating SDN with our testbed, again at edge gateway, to evaluate its influence on performance metrics under similar cyber threats. The rationale behind incorporating SDN into our testbed is its" + }, + { + "type": "page_footnote", + "bbox": [ + 0.108, + 0.833, + 0.236, + 0.846 + ], + "angle": 0, + "content": "*Corresponding author" + }, + { + "type": "page_footnote", + "bbox": [ + 0.107, + 0.847, + 0.284, + 0.859 + ], + "angle": 0, + "content": "**Principal corresponding author" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.861, + 0.339, + 0.874 + ], + "angle": 0, + "content": "jamshidi.saeid@polymt1.ca," + }, + { + "type": "page_footnote", + "bbox": [ + 0.085, + 0.874, + 0.319, + 0.885 + ], + "angle": 0, + "content": "kawser.wazed-nafi@polymtl.ca," + }, + { + "type": "page_footnote", + "bbox": [ + 0.084, + 0.887, + 0.48, + 0.909 + ], + "angle": 0, + "content": "amin.nikanjam@polymt1.ca, foutse.khomh@polymt1.ca (S.J.K.W.N.A.N.F. Khomh)" + }, + { + "type": "page_footnote", + "bbox": [ + 0.114, + 0.91, + 0.167, + 0.921 + ], + "angle": 0, + "content": "ORcld(s):" + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.833, + 0.48, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.082, + 0.954, + 0.416, + 0.968 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.827, + 0.953, + 0.914, + 0.968 + ], + "angle": 0, + "content": "Page 1 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.07, + 0.486, + 0.249 + ], + "angle": 0, + "content": "potential to improve resource management in IoT systems significantly [16][17]. We conduct a comparative analysis of the performance of seven state-of-the-art ML-based IDSs in two distinct setups: firstly, at the edge gateway, and secondly, in a similar setup augmented with SDN integration at the edge gateway, all under real-time cyber threats. This analysis is designed to elucidate the impact of SDN on performance metrics and resource management in IoT systems, especially highlighting how SDN integration can optimize the operational efficiency and resilience of IoT networks against the backdrop of evolving cyber threats. To summarize, this paper makes the following contributions:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.26, + 0.488, + 0.381 + ], + "angle": 0, + "content": "- Assessing performance metrics of ML-based IDS in IoT systems under real-time cyber threats: Our investigation revealed the significant impact of seven ML-based IDS on the performance at the edge, specifically measuring CPU usage, CPU load, and energy consumption amidst cyber threats. Utilizing ANOVA, we clarify the operational consequences of deploying these sophisticated IDSs on the edge." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.39, + 0.488, + 0.495 + ], + "angle": 0, + "content": "- Evaluating the impact of ML-based IDS at edge integrated with SDN: we evaluated the performance metrics of seven ML-based IDS at the edge gateway system integrated with SDN. Utilizing ANOVA, we clarify the impact of the integrated SDN with IoT on deploying these sophisticated IDS under real-time cyber threats." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.506, + 0.488, + 0.655 + ], + "angle": 0, + "content": "- Proposing a plugin-based ML-based IDS test suite: This test suite comes with a group of available datasets and available ML-based IDSs and allows the users to define their own IoT and SDN applications and test their ML-based IDSs and models in terms of detection accuracy and performance metrics. Researchers can efficiently perform comparative analyses for their algorithms and models with other available algorithms and models. The test suite is publicly available (section 8) for researchers and practitioners to reuse." + }, + { + "type": "list", + "bbox": [ + 0.113, + 0.26, + 0.488, + 0.655 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.666, + 0.486, + 0.785 + ], + "angle": 0, + "content": "The remainder of this paper is organized as follows: Section 2 discusses the review of our research literature. Section 3 discusses the necessary background knowledge. In Section 4, we describe the experimental design, the Research Questions (RQs), and the metrics of the experiments. Section 5 explains our results and findings. Section 8 discusses threats to the validity of our study. Finally, Section 9 concludes the paper and outlines future work." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.808, + 0.238, + 0.824 + ], + "angle": 0, + "content": "2. Related Works" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.83, + 0.486, + 0.935 + ], + "angle": 0, + "content": "Understanding the performance trade-offs of ML-based IDS in IoT, especially in resource-constrained edge gateways, remains an open challenge. While numerous studies, as mentioned in the previous section, have focused on detection accuracy, limited research has analyzed their real-time computational impact. In particular, there is a significant gap in understanding how ML-based IDS operate under real-time" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.07, + 0.915, + 0.13 + ], + "angle": 0, + "content": "cyber threats, especially when integrated with SDN. This section reviews prior works on ML-based IDS in IoT and SDN, examining their strengths and limitations and focusing on ML models and energy consumption concerns." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.143, + 0.741, + 0.157 + ], + "angle": 0, + "content": "2.1. IoT Intrusion Detection" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.159, + 0.915, + 0.337 + ], + "angle": 0, + "content": "Alsulami et al. [18] proposed a new ML model to identify and categorize network activity in IoT systems. Their research aimed to classify network traffic into distinct categories, including normal behavior and various types of attacks (e.g., Mirai, Denial-of-Service (DoS), Scan, and Man-in-the-Middle (MITM)). The study involved testing several supervised learning models on the customized IoTID20 dataset, including Spiking Neural Networks (SNNs), DT, Boosting Trees (BT), Support Vector Machines (SVM), and KNN. These models, enhanced through deep feature engineering, effectively identified and classified network anomalies." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.339, + 0.915, + 0.549 + ], + "angle": 0, + "content": "Mukherjee et al. [19] conducted an in-depth investigation into the predictive capabilities of supervised learning models (e.g., Logistic Regression (LR), Naïve Bayes (NB), DT, RF, and Artificial Neural Network (ANN)) for anomaly detection. Their study utilized a dataset comprising 350,000 data points. The research compared these models against established state-of-the-art techniques, including BIRCH clustering and K-Means, and evaluated their performance in different scenarios. This included an analysis using the complete dataset and a separate evaluation after removing binary data points in the 'value' feature. The models demonstrated high precision in both scenarios, underscoring their efficacy in practical anomaly forecasting and enhancing security measures against potential risks." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.55, + 0.915, + 0.714 + ], + "angle": 0, + "content": "Elnakib et al. [20] proposed the Enhanced Intrusion Detection Deep Learning Multi-class Classification Model (EIDM), a sophisticated Deep Learning (DL) model designed to enhance security in the IoT context. This model is adept at accurately categorizing 15 distinct traffic characteristics, encompassing a range of 14 discrete attack types. The performance of EIDM was evaluated against four other contemporary models, focusing on classification accuracy and efficiency. The increased precision of EIDM highlights its promise as a powerful solution for safeguarding IoT networks against a wide range of attacks." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.715, + 0.915, + 0.864 + ], + "angle": 0, + "content": "Douiba et al. [21] proposed an innovative IDS to enhance IoT device security. Their approach utilized gradient boosting and DT in the Catboost framework. The model's performance was rigorously assessed on several datasets, including NSL-KDD, IoT-23, BoT-IoT, and Edge-IIoT, with optimization achieved through GPU acceleration. The IDS distinguished itself with its ability to detect anomalies in real-time and its computing efficiency, demonstrating high accuracy, recall, and precision metrics, around \\(99.9\\%\\) on a record detection and computation time." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.866, + 0.915, + 0.925 + ], + "angle": 0, + "content": "Kasongo et al. [22] presented a research endeavor in which they proposed a Feed-Forward Deep Neural Network (FFDNN) IDS, enhanced by the inclusion of a Wrapper Feature Extraction Unit (WFEU) utilizing the Extra Trees" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.827, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 2 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.044, + 0.713, + 0.057 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.069, + 0.486, + 0.205 + ], + "angle": 0, + "content": "algorithm. The WFEU-FFDNN was evaluated for its performance on several datasets, including UNSW-NB15 and AWID, and compared with traditional ML methods. The system demonstrated high classification accuracies in binary and multiclass classifications across these datasets, significantly outperforming in scenarios involving the AWID dataset. The enhanced precision of the WFEU-FFDNN model emphasizes its efficacy in real-time anomaly detection and computing efficiency." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.206, + 0.486, + 0.355 + ], + "angle": 0, + "content": "In addition to all of the works stated above, Verma et al. [23] examined ML algorithms in the context of augmenting security measures in the IoT. The researchers compared classifiers using benchmark datasets (e.g., CIDDS-001, UNSW-NB15, and NSL-KDD). This analysis was supported by statistical tests, namely the Friedman and Nemenyi tests. The researchers also evaluated the reaction times on the Raspberry Pi platform, showcasing the adaptability and efficiency of the classifiers in IoT scenarios, hence emphasizing their practical relevance." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.356, + 0.486, + 0.549 + ], + "angle": 0, + "content": "Otoum et al. [24] presented a scholarly investigation in which they propose a DL-powered intrusion detection system (DL-based IDS) to effectively address challenges associated with feature learning and dataset management. The DL-based IDS developed by the researchers integrates the Spider Monkey Optimization(SMO) algorithm with the stacked-deep polynomial network (SDPN) to enhance threat identification. The system can detect various abnormalities, including DoS, User to Root attacks (U2R), probing, and Root-to-local attacks (R2L). The DL-based IDS was evaluated using the NSL-KDD dataset and exhibited outstanding performance metrics, showcasing its efficacy in various aspects of threat detection." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.551, + 0.486, + 0.701 + ], + "angle": 0, + "content": "Gaber et al. [25] highlight securing IoT systems, especially in complex environments ( e.g., smart cities). The authors introduced a feature selection methodology that combines constant removal and recursive feature elimination strategies. They utilized a DT classifier with a subset of 8 characteristics, assessed on the AWID dataset using various ML classifiers. In contrast to existing methods, their approach exhibited exceptional performance, achieving high accuracy, precision, and F1 score rates. These results underscore the potential of their methodology in the domain of IoT-IDS." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.702, + 0.486, + 0.865 + ], + "angle": 0, + "content": "Sachdeva et al. [26] investigate the issue of fortifying cybersecurity in IoT networks to mitigate the impact of distributed denial-of-service (DDoS) attacks. The authors put out an innovative approach for data pre-processing, which involves the integration of ML and DL classifiers. The class imbalances in the BOT-IoT and TON-IoT datasets from UNSW Australia are mitigated using several Synthetic Minority Oversampling Technique (SMOTE) variants. The hybrid methodology employed in this study, which integrates many algorithms, demonstrates the promising prospects for efficient detection of DDoS attacks in IoT networks." + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.882, + 0.485, + 0.925 + ], + "angle": 0, + "content": "The related works discussed above show that the most ML-based IDS developed and re-used by researchers are DT, KNN, RF, LSTM, CNN, and a hybrid model of CNN and" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.07, + 0.914, + 0.13 + ], + "angle": 0, + "content": "LSTM. In addition, EIDM is the most recent work that has overcome the limitations of the previous ML models. That is why we proceed with all these six ML-based IDS to carry out our study in this paper." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.142, + 0.77, + 0.157 + ], + "angle": 0, + "content": "2.2. Energy consumption in IDS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.158, + 0.915, + 0.457 + ], + "angle": 0, + "content": "Only a tiny amount of research has been done so far to determine the energy consumption in IDS. Among them, Tekin et al. [12] investigated the topic of IDS in the context of the IoT, with a specific focus on the energy consumption aspect in devices with limitations. The authors assessed various ML paradigms in the context of cloud computing, edge computing, and IoT devices. They specifically emphasize the promising capabilities of TinyML for microcontroller units (MCUs). DT algorithm demonstrates in terms of training, inference, and power efficiency. Although Naive Bayes (NB) has superior training speed, it exhibits a minor accuracy trade-off requirements of the KNN algorithm increase proportionally with the quantity of the dataset, hence diminishing its suitability for deployment in IoT systems. Both DT and RF exhibit low power consumption and high accuracy. However, it is essential to consider that RF's longer execution time represents a trade-off. The research findings also elucidate the advantages and constraints of cloud-based ML, underscoring the significance of algorithm choice in practical implementations." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.458, + 0.914, + 0.637 + ], + "angle": 0, + "content": "Nimmy et al. [27] utilize the energy consumption patterns of IoT devices to identify irregularities in smart home environments. They developed a prototype of a smart camera based on Raspberry Pi to gather power traces during regular operations and simulated DDoS attacks. This approach emphasizes the importance of energy consumption as a crucial indicator of aberrant behaviors. The deep feedforward neural network used in their study demon- strates exceptional performance in identifying anomalies, as evidenced by rigorous evaluations of ML models. This indicates its potential to enhance the security of smart homes significantly." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.651, + 0.803, + 0.664 + ], + "angle": 0, + "content": "2.3. IoT Intrusion Detection in SDN" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.666, + 0.915, + 0.904 + ], + "angle": 0, + "content": "Chaganti et al. [28] present a sophisticated IDS for IoT networks. This system leverages SDN and specifically emphasizes the utilization of DL techniques. The research is for its utilization of LSTM networks, a Recurrent Neural Network (RNN) type renowned for its efficacy in handling time series data, which is critical in detecting network threats. The authors' principal contribution is utilizing an LSTM model, which they employ to discern network attacks. To evaluate the efficacy of their approach, the authors conduct a comparative analysis with alternative architectures(e.g., SVM). The experimental findings present solid evidence that highlights the improved efficacy of the LSTM model in accurately categorizing various network attacks. The LSTM model demonstrated exceptional accuracy and efficiency in detecting attack patterns, surpassing conventional ML models in precision and recall metrics." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.906, + 0.914, + 0.934 + ], + "angle": 0, + "content": "M. M. Isa et al. [29] present the DAERF model in their research, an innovative IDS for SDN. This model combines" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.828, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 3 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.07, + 0.485, + 0.249 + ], + "angle": 0, + "content": "a Deep Autoencoder (DAE) with an RF algorithm, creating a unique approach. The DAE excels in feature extraction and data dimensionality reduction. At the same time, the RF approach, known for using an ensemble of DTs, shows significant accuracy and robustness in classification tasks. The DAERF model was evaluated in a simulated SDN using commonly used datasets, demonstrating a high efficacy level. The integration of DL and ML in the DAERF model represents a novel approach that effectively identifies and categorizes network intrusions, enhancing the security of SDN systems and ensuring their capability to handle real-time applications with scalability and adaptability." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.251, + 0.485, + 0.504 + ], + "angle": 0, + "content": "Phan The Duy et al. [30] presented 'FoolYE,' an innovative IDS designed specifically for SDN systems. The system combines cyber deception techniques with Moving Target Defense (MTD) methodologies. The core of this methodology lies in its ability to create a dynamic and misleading network environment, making it challenging for malicious actors to identify and exploit genuine resources. A key innovation is deep transfer learning-based IDS, which employs advanced DL models (e.g., ResNet50 and DenseNet161), originally designed for image recognition. These models have been adapted using deep transfer learning techniques to analyze network traffic for ML-based IDS, demonstrating the versatility and efficacy of DL in cybersecurity. The study involved experiments in simulated SDN systems, where the performance of the IDS was thoroughly examined, showing its high capability in accurately detecting a wide range of network intrusions." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.506, + 0.485, + 0.729 + ], + "angle": 0, + "content": "Despite advancements in ML-based IDS for IoT, a significant gap remains in understanding their real-time computational impact, especially in energy consumption, CPU load, and CPU usage at the edge gateway. This gap is further compounded by the lack of empirical studies evaluating the effectiveness and efficiency of ML-based IDS in real-world, resource-constrained edge gateway, especially when integrated with SDN during cyber threats. To address these shortcomings, our study provides a comprehensive empirical analysis of ML-based IDS, focusing on their performance trade-offs in SDN-enabled and non-SDN edge gateways. Specifically, we assess how different ML-based IDS models impact system resources under real-time cyber threats, offering critical insights into their feasibility for deployment in IoT networks." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.748, + 0.218, + 0.765 + ], + "angle": 0, + "content": "3. Background" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.77, + 0.484, + 0.798 + ], + "angle": 0, + "content": "This section dives into the underlying premise of the research's baselines." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.8, + 0.485, + 0.935 + ], + "angle": 0, + "content": "Decision Tree (DT): In the field of IDS, DT is a key ML method for analyzing network data. They use trees, e.g., models, to break down network features into binary decisions, evaluating network attributes at each node to identify effective splits. This creates a rule-based hierarchy that excels at spotting differences between normal and suspicious network activities. DTs are valued for their clarity and ease of interpretation, playing a vital in improving cybersecurity by identifying unusual or unauthorized actions" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.07, + 0.578, + 0.083 + ], + "angle": 0, + "content": "[31] [32]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.085, + 0.915, + 0.219 + ], + "angle": 0, + "content": "Random Forest (RF): The algorithm is highly valued in IDS for its precision in classifying network data. Utilizing RF, an ML algorithm, it creates a group of DT to assess various network attributes, effectively distinguishing between normal and malicious activities. RF excels in managing large datasets, balancing IDS data disparities, and minimizing overfitting, making IoT and network security crucial. It achieves accurate detection of unusual network behaviors [33] [34]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.221, + 0.915, + 0.565 + ], + "angle": 0, + "content": "K-Nearest Neighbor (KNN): The KNN algorithm is a key IDS tool known for its effective similarity-based classification. It compares network traffic with existing labeled data using distance metrics to classify new instances, with 'k' indicating the number of neighbors considered. This method is crucial for identifying normal versus abnormal network activities, offering a simple yet versatile solution for real-time IDS. KNN excels in both binary and multiclass problems, providing quick, reliable categorizations crucial for responding to threats in dynamic networks [35] [36] [37]. Long short-term memory (LSTM): LSTM networks, a type of recurrent neural network, are highly effective in analyzing sequential data for IDS. Their unique memory cells excel at identifying complex patterns in network traffic, making them adept at spotting advanced threats that traditional methods may miss. LSTMs are especially valuable for maintaining context over data sequences, which is crucial for distinguishing between normal and malicious network activities. Their application in IDS significantly boosts cybersecurity, especially in dynamic and IoT environments, by adapting to new threats and efficiently handling varying data lengths, offering a robust solution to modern cybersecurity challenges [38] [39]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.566, + 0.915, + 0.73 + ], + "angle": 0, + "content": "Convolutional Neural Network(CNN): CNNs provide a resilient DL methodology for IDS. CNNs are widely recognized for their ability to independently acquire hierarchical features from network traffic. This is achieved through convolutional, pooling, and fully connected layers, which enable the discernment of spatial patterns in the traffic data. This capacity facilitates the recognition of both well-established and new threats. CNN in IDS is considered crucial in enhancing cybersecurity defenses against a wide range of cyber threats due to their capacity to scale effectively and efficiently handle real-time data [40] [41]." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.732, + 0.915, + 0.925 + ], + "angle": 0, + "content": "Hybrid model of LSTM and CNN: The integration of LSTM and CNN models into IDS significantly boosts network security by combining the spatial analysis capabilities of CNNs with the temporal pattern recognition of LSTMs. This hybrid approach detects complex cyber threats by analyzing network traffic data in both spatial and temporal dimensions. CNNs effectively identify security breaches through local pattern recognition, while LSTMs track the sequence of network events over time, offering a detailed understanding of potential threats. This fusion results in more accurate and efficient detection of sophisticated, multistage attacks, reducing false positives and adapting to new threats, thereby enhancing overall anomaly detection and" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.827, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 4 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.07, + 0.486, + 0.099 + ], + "angle": 0, + "content": "maintaining network integrity without excessive alerts [42] [43]." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.1, + 0.486, + 0.309 + ], + "angle": 0, + "content": "EIDM: The EIDM is a cutting-edge IDS approach expertly handling a wide range of network events. Its design combines convolutional and dense layers to tackle the challenges of class diversity and data imbalance. The model begins with a 120-node dense layer, followed by an 80-neuron convolutional layer with a kernel size of 20 to better distinguish between similar network activities. It also features a Maxpooling layer for enhanced feature extraction and a dropout layer to avoid overfitting. EIDM can classify 15 network behaviors through six dense layers, using 'relu' activation and SGD and Adam optimizers for optimal accuracy and efficiency. According to [20], EIDM's unique structure and optimization techniques make it a standout solution for improving network IDS." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.332, + 0.221, + 0.35 + ], + "angle": 0, + "content": "4. Study design" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.354, + 0.486, + 0.443 + ], + "angle": 0, + "content": "This section describes our methodology to evaluate the impact of specific ML-based IDSs using selected performance metrics. We first mention our Research Questions (RQs), followed by an explanation of the experimental design and the metrics used to evaluate the impact of the ML-based IDS." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.457, + 0.321, + 0.473 + ], + "angle": 0, + "content": "4.1. Research questions(RQs)" + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.474, + 0.435, + 0.488 + ], + "angle": 0, + "content": "Our research aims to address the following RQs:" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.498, + 0.486, + 0.557 + ], + "angle": 0, + "content": "- RQ1: How do ML-based IDSs impact CPU usage, CPU load, and energy consumption at the edge gateway without SDN during real-time cyber threats?" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.558, + 0.486, + 0.663 + ], + "angle": 0, + "content": "This RQ examines the impact of ML-based IDSs on crucial performance metrics, specifically CPU usage, CPU load, and energy consumption, at edge gateway not integrated with SDN. It focuses on analyzing the performance of seven state-of-the-art ML-based IDSs and their impacts on these key metrics in the face of diverse cyber threats." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.673, + 0.486, + 0.734 + ], + "angle": 0, + "content": "- RQ2: What are the differences in CPU usage, CPU load, and energy consumption impacts of ML-based IDS at the edge gateway with SDN integration during real-time cyber threats?" + }, + { + "type": "text", + "bbox": [ + 0.124, + 0.734, + 0.486, + 0.809 + ], + "angle": 0, + "content": "This RQ explores how ML-based IDSs influence CPU usage, CPU load, and energy consumption at the edge gateway integrated with SDN. It involves analyzing the impacts of various ML-based IDSs on these essential performance metrics under various cyber threats." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.822, + 0.186, + 0.836 + ], + "angle": 0, + "content": "4.2. DataSet" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.839, + 0.486, + 0.928 + ], + "angle": 0, + "content": "In our study, we used the CICIDS2017 data set [44], a highly regarded resource organized by the Canadian Institute for Cybersecurity. This dataset is recognized as one of the gold standards in cybersecurity research, capturing a broad spectrum of benign network activities and the latest cyberattacks [45]. CICIDS2017 is designed to simulate" + }, + { + "type": "table_caption", + "bbox": [ + 0.514, + 0.074, + 0.868, + 0.1 + ], + "angle": 0, + "content": "Table 1 Distribution of labeled IoT-SDN attacks in the dataset" + }, + { + "type": "table", + "bbox": [ + 0.522, + 0.103, + 0.901, + 0.333 + ], + "angle": 0, + "content": "
IoT Attack LabelsNo of labeled entries
BENIGN2271320
DoS Hulk230124
Port Scan158804
DDoS128025
DoS GoldenEye10293
FTP-Patator7935
SSH-Patator5897
DoS slowloris5796
DoS Slowhtttest5499
Bot1956
Web Attack & Brute Force1507
Web Attack & XSS652
Infiltration36
Web Attack & SQL Injection21
Heartbleed11
" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.36, + 0.914, + 0.435 + ], + "angle": 0, + "content": "real-world network environments, making it an essential resource for researchers to test and validate advanced IDS thoroughly. The breadth and diversity of the asset highlight its importance, making it necessary for those aiming to strengthen network security paradigms." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.448, + 0.704, + 0.462 + ], + "angle": 0, + "content": "4.3. The ML-based IDS" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.464, + 0.915, + 0.718 + ], + "angle": 0, + "content": "Numerous ML-based IDS have been developed by researchers [12] [22] [25] [46]. However, we had a significant challenge in reviewing these publications and selecting some for our study. Most did not make their solutions' applications or source code publicly available. This lack of transparency hinders the ability to experiment with these works in real IoT devices. This omission complicates, and may even prevent, the objective comparison of the proposed solutions. Consequently, to initiate our study, it became necessary to independently implement all ML-based IDS that have been previously utilized, except the ML-based IDS proposed by [20], which shared their code ML-based IDS available to researchers. In this section, we explore the implementation process of seven ML-based IDSs that we have developed: DT, KNN, RF, LSTM, CNN, and a hybrid model of LSTM and CNN. Table 3 presents a comparative analysis of the performance metrics of ML-based IDS." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.732, + 0.664, + 0.746 + ], + "angle": 0, + "content": "4.3.1. DT, KNN, RF" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.748, + 0.915, + 0.926 + ], + "angle": 0, + "content": "We have developed and deployed DT-based IDS, RF-based IDS, and KNN-based IDS [47], each specifically designed to improve security policy. The foundation of these models is a preprocessing technique applied to the selected CICIDS 2017 dataset. The dataset features various simulated cyber-attack scenarios alongside standard traffic data. It encompasses multiple numerical attributes, including but not limited to packet sizes, flow durations, and bytes per flow, which are critical for analyzing network behavior and detecting anomalies. We applied min-max normalization as our initial preprocessing step to ensure uniformity across these diverse numerical attributes and" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.828, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 5 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.074, + 0.761, + 0.101 + ], + "angle": 0, + "content": "Table 6 Comparison of structure and accuracy of different Neural Network models in IDS for IoT-SDN network" + }, + { + "type": "table", + "bbox": [ + 0.178, + 0.105, + 0.819, + 0.376 + ], + "angle": 0, + "content": "
DatasetCICIDS2017CICIDS2017CICIDS2017CICIDS2017
Categories15151515
ModelLSTMLSTM+CNNCNNEIDM
Layers1011812
Parameters5638612795349748735
Structure detailsDense (64)Dense (64)Dense (120)
Dense (128)Conv1D (64, 10)Conv1D (16,30)Conv1D(80,20)
LSTM (128)Conv1D (64, 10)Conv1D (16,30)MaxPooling1D (2)
LSTM (256)MaxPooling1D (2)MaxPooling1D (2)Dense (120)
Dense (128)LSTM (128)Flatten()Dense (100)
Dense (48)LSTM (64)Dense (32)Dense (80)
Dense (15)Dense (64)Dense (15)Dense (60)
Dense (15)Dense (60)
Dense (40)
Dense (15)
Training Accuracy (%)97.72%98.77%97.92%99.57%
Testing Accuracy (%)93.86%95.75%94.74%99.56%
" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.4, + 0.486, + 0.642 + ], + "angle": 0, + "content": "mitigate scale discrepancies. Missing values were imputed to preserve the integrity of the data. The LabelEncoder[48] was utilized to convert labels into a format suitable for ML techniques. An essential aspect of our methodology is to divide the selected dataset into training and testing subsets. For the first RQ, we adopted \\(80\\%\\) training and \\(20\\%\\) testing, aligning with standard practices in ML model development. This adjustment was made to accommodate the different requirements of each research phase. As shown in Table 1, the dataset has five classes (Benign, DDoS, DoS, Brute force, and Port scan) with significantly more entries than the remaining ten classes, which contain fewer samples. SMOTE [49] with auto-sampling was employed to address the class imbalance issue in the dataset. This technique effectively augmented the representation of underrepresented classes, leading to a more balanced dataset for training purposes." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.653, + 0.174, + 0.667 + ], + "angle": 0, + "content": "4.3.2. CNN" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.668, + 0.488, + 0.729 + ], + "angle": 0, + "content": "In our research, we deployed a CNN-based IDS tailored for our experimental testbed. The configuration details of the CNN model, including its layers, parameters, and architecture specifics, are outlined in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.74, + 0.185, + 0.755 + ], + "angle": 0, + "content": "4.3.3. LSTM" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.756, + 0.488, + 0.817 + ], + "angle": 0, + "content": "In our investigation, we implemented an LSTM-based IDS specifically for our testbeds. The detailed architecture and parameters of the LSTM model, crucial for its operation in our IDS, are thoroughly presented in Table 2." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.828, + 0.379, + 0.844 + ], + "angle": 0, + "content": "4.3.4. Hybrid model of LSTM and CNN" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.844, + 0.488, + 0.935 + ], + "angle": 0, + "content": "In our exploration, we implemented a hybrid LSTM and CNN architectures model to create an advanced IDS tailored to our experimental setup. This architecture has already been tested in various scenarios [50][51][43]. The intricate configuration of this hybrid LSTM and CNN model, which leverages the strengths of both LSTM and CNN to enhance" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.4, + 0.81, + 0.414 + ], + "angle": 0, + "content": "detection capabilities, is detailed in Table 2." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.415, + 0.918, + 0.702 + ], + "angle": 0, + "content": "The goal of using the hybridization of LSTM and CNN is twofold. First, CNN can drop the non-impactful features and select only the impactful ones (feature engineering). At the same time, it helps to learn the features in a Spatial Hierarchical manner [52]. Second, from our dataset, we got 77 features. As it is unknown which features are impactful from the given features, we applied a 2 1-dimensional CNN layer followed by a max-pooling layer to find the impactful features by learning the 10 nearby features together (kernel size 10). This helps us to create new feature representations where the impactful ones are sustained. Later, we fed these newly derived features directly to 2 LSTM layers. This step helps to learn the spatial and temporal features from CNN, resulting in feature representations presented in context and awarded. Finally, we applied 2 Dense layers to regress the feature representations generated from previous CNN and LSTM layers into 15 classes. This process helps us learn the input features more deeply and increase the classification accuracy." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.714, + 0.72, + 0.729 + ], + "angle": 0, + "content": "4.4. Experimental Design" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.73, + 0.917, + 0.926 + ], + "angle": 0, + "content": "To address RQ1, we designed a testbed incorporating two Raspberry Pi 4 Model B units as edge gateways. Each unit is equipped with 8GB of RAM and a 1.5GHz 64-bit quad-core CPU, providing a realistic environment for evaluating the computational impact of ML-based IDS at the edge gateway. Our study evaluates the performance of seven ML-based IDS models: DT, KNN, RF, LSTM, CNN, EIDM, and a hybrid of LSTM and CNN model, selected for their established effectiveness in cybersecurity. We conducted controlled experiments in IoT-edge networks to assess these IDS models, simulating a range of cyber threats(e.g., BENIGN, DDoS, DoS, Brute force attacks, and the Port scan) using Kali Linux [53]. These experiments" + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.827, + 0.953, + 0.914, + 0.968 + ], + "angle": 0, + "content": "Page 6 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.286, + 0.044, + 0.712, + 0.057 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.073, + 0.375, + 0.1 + ], + "angle": 0, + "content": "Table 3 Performance Comparison of ML-based IDS" + }, + { + "type": "table", + "bbox": [ + 0.081, + 0.105, + 0.764, + 0.19 + ], + "angle": 0, + "content": "
DTKNNRFLSTMLSTM+CNNCNN
Accuracy0.99850.99670.99810.93860.95750.9474
Precision0.99850.99660.99800.97710.98770.9792
Recall0.99850.99670.99810.95240.96450.9611
F1-Score0.99850.99660.99800.96460.97600.9701
" + }, + { + "type": "image", + "bbox": [ + 0.239, + 0.208, + 0.765, + 0.455 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.219, + 0.473, + 0.779, + 0.488 + ], + "angle": 0, + "content": "Figure 1: IoT-edge testbed topology, illustrating non-SDN and SDN-enabled setups." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.52, + 0.485, + 0.565 + ], + "angle": 0, + "content": "enabled us to analyze the IDS models' impact on critical performance metrics, specifically CPU usage, CPU load, and energy consumption." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.566, + 0.485, + 0.685 + ], + "angle": 0, + "content": "To address RQ2, we extended our testbed by integrating the edge gateway with the Ryu controller, establishing an SDN-based environment. Ryu, an open-source Python-based SDN controller [54], provides centralized traffic management, enhancing resource allocation and security analysis. We further utilized Mininet [55] to simulate a realistic SDN infrastructure consisting of eighteen hosts, six switches, and a Ryu controller, mirroring real-world network conditions." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.698, + 0.184, + 0.712 + ], + "angle": 0, + "content": "4.5. Metrics" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.714, + 0.485, + 0.787 + ], + "angle": 0, + "content": "We evaluated CPU usage, CPU load, and energy consumption in our test beds in the context of ML-based IDS during cyber threat scenarios. We employed the ANOVA[56] to ensure an objective assessment of the performance of various ML-based IDS." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.802, + 0.3, + 0.816 + ], + "angle": 0, + "content": "4.5.1. CPU Load CPU Usage" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.817, + 0.485, + 0.936 + ], + "angle": 0, + "content": "IDS, especially at the edge and SDN environments. CPU usage measures the percentage of the CPU's current capacity, reflecting how much processing power is dedicated to task execution. High CPU usage in an IDS can signal extensive computational demands, potentially impacting the performance of other tasks and system responsiveness, a concern in resource-limited IoT settings. Efficient IDS, especially those utilizing ML techniques, must manage CPU" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.52, + 0.915, + 0.775 + ], + "angle": 0, + "content": "usage carefully to balance detection accuracy with minimal resource use. Excessive CPU usage can slow IDS's real-time network traffic processing, leading to delays or missed attack detection. On the other hand, CPU load indicates the number of processes waiting to be executed, providing an understanding of the CPU's workload. An increase in CPU load might suggest heavy network traffic or numerous attack attempts, highlighting the risk of system overload. Monitoring CPU load allows for early identification of potential bottlenecks, ensuring that IDS operations do not adversely impact system performance. In SDN-enabled IoT edge systems, adept CPU load management is vital to distribute tasks between IDS and other network efficient functions, ensuring optimal resource allocation and system performance. Both CPU usage and load are pivotal metrics for assessing IDS efficacy in environments where resources are constrained, e.g., at the edge gateway[57][58][59]." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.79, + 0.756, + 0.803 + ], + "angle": 0, + "content": "4.5.2. CPU Performance Metrics" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.805, + 0.914, + 0.923 + ], + "angle": 0, + "content": "To assess the computational impact of ML-based IDS, we analyze both CPU load and CPU usage, as these metrics provide complementary insights into system performance. CPU usage is typically expressed as a percentage, indicating the proportion of processing power utilized at a given moment. In contrast, CPU load is presented as a numerical value, representing the average number of active processes waiting for CPU execution over a specific time interval. Moreover," + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.954, + 0.905, + 0.967 + ], + "angle": 0, + "content": "Page 7 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.069, + 0.488, + 0.22 + ], + "angle": 0, + "content": "while CPU load can be converted into a percentage, it provides a more detailed view of system stress, especially in multi-core environments. In a multi-core processor, a load value of 1.0 on a single-core system indicates full utilization. In contrast, on a quad-core system, a load of 1.0 suggests that only \\(25\\%\\) of the total available processing capacity is used. This distinction is crucial when interpreting our results, as high CPU load does not always imply that the system is at risk of overutilization—it depends on the number of available processing cores and the workload distribution." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.232, + 0.292, + 0.247 + ], + "angle": 0, + "content": "4.5.3. Energy Consumption" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.248, + 0.486, + 0.727 + ], + "angle": 0, + "content": "Energy consumption, often measured in watt-hours or joules, quantifies the amount of energy a device or system expended during its operation. In IoT hardware, where many devices are battery-powered or operate in energy-constrained environments, efficient energy consumption is desirable and necessary. Devices (e.g., sensors, actuators) and even more complex IoT nodes must be designed to perform their tasks while consuming minimal energy, ensuring longevity, and reducing the need for frequent battery replacements or recharges. Moreover, IoT devices integrated with SDN bring a new dimension to the energy conversation; SDN centralizes network control, dynamically optimizing network resources based on real-time demands. Although this centralization offers enhanced flexibility and scalability, it also means that the network's core components must be energy efficient. In IoT systems, where potentially thousands or even millions of devices communicate and exchange data, even minor inefficiencies in energy consumption can accumulate, leading to significant energy drains. Integrating ML-based IDS into the edge gateway emphasizes the need to consider energy metrics critically. ML-based IDS are inherently data-intensive, requiring substantial computational resources to process large datasets for detecting and mitigating security threats. Although these systems offer invaluable security enhancements, their operation can be energy-intensive. Therefore, measuring and optimizing the energy consumption of ML-based IDS is crucial to ensure they deliver effective security measures without unduly burdening the system's energy resources. This balance is essential for maintaining the sustainability and efficiency of the edge gateway, where energy efficiency is often a key concern." + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.728, + 0.488, + 0.835 + ], + "angle": 0, + "content": "We employed PowerTop [60], a robust tool, to precisely gauge and examine the energy consumption in two separate testbed configurations: the edge gateway integrated with SDN and without SDN. PowerTop's sophisticated monitoring capabilities allowed us to gain insights into these testbeds' energy consumption patterns and processor activity." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.846, + 0.301, + 0.861 + ], + "angle": 0, + "content": "4.5.4. Designed cyber threats" + }, + { + "type": "text", + "bbox": [ + 0.08, + 0.862, + 0.488, + 0.924 + ], + "angle": 0, + "content": "For our research, we focused on analyzing DDoS, DoS, brute force attacks, and the port scan. We chose these specific types of attacks since they were already categorized in the employed dataset. These cyber threats are prevalent and" + }, + { + "type": "text", + "bbox": [ + 0.51, + 0.069, + 0.915, + 0.101 + ], + "angle": 0, + "content": "pose substantial risks in the field of cybersecurity. Below, a concise summary of each is presented:" + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.111, + 0.917, + 0.309 + ], + "angle": 0, + "content": "- A Denial-of-Service (DoS): At the edge, DoS attacks are critical cybersecurity threats that disrupt device and service operations by flooding systems with excessive requests and consuming vital resources (e.g., bandwidth, processing power, and memory). This overload prevents the system from serving legitimate users, blocking access to essential operations. The distributed, resource-constrained nature of the edge makes them especially susceptible to DoS attacks. The vulnerability of these devices, coupled with their interconnectedness, means that an attack on a single device can significantly compromise the entire network's functionality and security [61]." + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.317, + 0.917, + 0.528 + ], + "angle": 0, + "content": "- A distributed denial-of-service (DDoS): A DDoS attack is a coordinated effort where multiple attackers from different locations flood a specific target, such as a server or network at the edge, with excessive traffic. The goal is to deplete the target's resources, causing severe service disruptions or a complete shutdown. Unlike traditional DoS attacks, which come from a single source, DDoS attacks are distributed across numerous sources, making them harder to defend against. This distributed nature makes DDoS attacks especially dangerous at the edge, where the interconnected and resource-constrained devices can exacerbate the attack's impact, potentially crippling the entire network [62]." + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.538, + 0.918, + 0.689 + ], + "angle": 0, + "content": "- Brute Force: A brute force attack involves an attacker systematically attempting to gain unauthorized access to a system by trying every possible combination, such as trying every key until one works. With its many interconnected devices and varying security levels, the edge is especially vulnerable to such attacks. Attackers exploit these weaknesses by repeatedly guessing passwords, encryption keys, or access codes, which seriously threatens the integrity and confidentiality of data at the edge gateway[63]." + }, + { + "type": "text", + "bbox": [ + 0.538, + 0.698, + 0.918, + 0.789 + ], + "angle": 0, + "content": "- Port Scan:A port scan aims to identify a target system's open ports. By identifying open ports and the services running on them at the edge, attackers can uncover and exploit vulnerabilities, posing a serious threat to the security and integrity of the edge gateway[64]." + }, + { + "type": "list", + "bbox": [ + 0.538, + 0.111, + 0.918, + 0.789 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.8, + 0.908, + 0.83 + ], + "angle": 0, + "content": "4.5.5. Analysis method for energy consumption, CPU usage, CPU load" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.831, + 0.917, + 0.937 + ], + "angle": 0, + "content": "We used ANOVA to assess our observed results. ANOVA is an indispensable statistical tool for testing the null hypothesis that posits the equivalence of group means. Our study specifically employed one-way ANOVA to examine the impact of a singular independent variable on the evaluated systems. This method relies on several crucial assumptions, including the necessity for the data to exhibit" + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.419, + 0.968 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.952, + 0.907, + 0.968 + ], + "angle": 0, + "content": "Page 8 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.286, + 0.044, + 0.712, + 0.057 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.07, + 0.485, + 0.114 + ], + "angle": 0, + "content": "a normal distribution, the variances between groups being equal (homogeneity of variance), and all observations being independent." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.115, + 0.486, + 0.732 + ], + "angle": 0, + "content": "In addition, we conducted 15 separate tests on ML-based IDS to measure CPU load, CPU usage, and energy consumption under various cyber threats. This rigorous approach allowed us to leverage the F statistic, which quantifies the variance ratio between the means of different groups to the variance in the groups. A significant F-statistic, together with a p-value of \\(\\leq 0.05\\), denotes statistically significant differences between group means, underscoring the efficacy of our testing methodology. By implementing this robust statistical framework, we have thoroughly evaluated the performance of various ML-based IDS models in response to different cyber threats. This analysis has allowed us to identify specific models that demonstrate resilience or efficiency against multiple attacks and require increased computational resources or energy consumption. While CPU load is a key performance metric for IDS evaluation, it is also crucial to consider its impact on IoT device availability and reliability. Excessive CPU consumption by an IDS can degrade the device's primary functions, leading to slow response times or system failures. This is especially critical in real-time applications such as healthcare, industrial automation, and smart home security, where device downtime can have serious consequences. An IDS must enhance security without inadvertently causing an attack such as a DDoS condition due to resource exhaustion. In addition, through these fifteen iterations of testing, ANOVA has enabled us to validate significant differences in IDS performance metrics (e.g., detection accuracy, false positive rates), CPU load, CPU usage, and energy consumption across diverse scenarios. This methodological approach provides a detailed examination of how different IDS models respond to varied threats, establishing a solid statistical foundation for assessing the efficacy of each model in a controlled environment. By distinguishing between performance differences attributable to the models' inherent capabilities and those due to random variation, our use of ANOVA has proven to be critical. It aids in identifying the most resource-efficient and reliable IDS, thereby guiding the selection process for optimal cybersecurity defenses and enhancing our management and understanding of IDS performance under cyber threat conditions [65] [66]." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.744, + 0.197, + 0.758 + ], + "angle": 0, + "content": "4.6. TestSuite" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.76, + 0.486, + 0.925 + ], + "angle": 0, + "content": "To initiate the research work presented in this paper and to facilitate the environment for further research and testing, we introduce a versatile test suite designed to experiment with and evaluate ML-based IDS in SDN environments. Unlike conventional experimental testbeds, our test suite is an extensible framework equipped with predefined APIs and a selection of pre-integrated algorithms, facilitating the seamless integration and testing of novel IDS models. Another good contribution to our test suite is that users can execute their experiments on it without Raspberry Pi or any other hardware support. As discussed in the previous" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.07, + 0.915, + 0.354 + ], + "angle": 0, + "content": "paragraph, the test suite is developed following the plug-in architecture feature. This ensures that the user can easily integrate their algorithm into the test suite and test the accuracy, energy consumption, and CPU usage with or without security threats. Users can create their own IoT-SDN network and complexity in the network and generate any number of security breaching attacks. This approach not only simplifies the validation process of IDS models in a realistic network scenario but also encourages the exploration of innovative IDS methodologies by providing a solid foundation of tools and benchmarks. We have made the test suite available with the same configuration discussed in Section 4.4. We integrated the same tools for creating an IoT-SDN network, generating security attacks, and measuring IDS accuracy, energy consumption, CPU usage, etc. Through its design, the test suite aims to advance the development and thorough evaluation of cutting-edge IDS solutions, significantly enhancing network security in the era of SDN." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.375, + 0.838, + 0.391 + ], + "angle": 0, + "content": "5. Experimental Results and Analysis" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.397, + 0.915, + 0.485 + ], + "angle": 0, + "content": "This section discusses our experimental results and findings. After presenting our results, we conducted an in-depth statistical analysis using ANOVA. This analysis aims to illuminate the implications and insights that emerge from the experimental results, providing an understanding of the efficacy and nuances of each IDS under study." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.5, + 0.787, + 0.528 + ], + "angle": 0, + "content": "5.1. Experimental finding for RQ1 CPU Load:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.53, + 0.915, + 0.858 + ], + "angle": 0, + "content": "We tested ML-based IDSs under various cyberattack scenarios to assess their impact and strain on our testbed. The types of cyberattacks we considered include DDoS, DoS, brute force attacks, and the port scan. Moreover, we conducted the ANOVA focusing on CPU load variations in our testbed. Figure 2 illustrates a comparative analysis of the average CPU load among different ML-based IDS models in the presence of various types of cyberattacks. The DL-based IDS (CNN, LSTM, combined model of LSTM and CNN, and EIDM) consistently maintain lower CPU loads across all attack types, demonstrating their efficiency in resource utilization during inference. In contrast, traditional ML-based IDS such as KNN, DT, and RF exhibit significantly higher CPU loads, especially under brute force and DDoS attacks, with KNN and DT being the most resource-intensive. This is because DL models, such as CNN and LSTM, efficiently handle computations in parallel and are optimized for inference. In contrast, traditional models (e.g., KNN and DT) require more repeated, resource-heavy calculations, such as distance computations in KNN or recursive splitting in DTs, especially under large-scale attacks." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.861, + 0.658, + 0.875 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.876, + 0.913, + 0.935 + ], + "angle": 0, + "content": "We conducted an ANOVA, and the results presented in Table 4 illuminate significant differences in CPU load among diverse ML-based IDS under DDoS, underscored by F-statistic of 60.40 and a p-value \\(< 0.05\\). This F-statistic delineates" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.966 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.82, + 0.954, + 0.905, + 0.966 + ], + "angle": 0, + "content": "Page 9 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.069, + 0.753, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.354, + 0.741, + 0.37 + ], + "angle": 0, + "content": "Figure 2: The Average CPU load of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.395, + 0.479, + 0.423 + ], + "angle": 0, + "content": "Table 4 ANOVA results: CPU Load for ML-based IDS under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.425, + 0.855, + 0.498 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.873601.6460.40< 0.05
in groups915426.4959.63
Total9727036.36278.73
" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.522, + 0.486, + 0.673 + ], + "angle": 0, + "content": "the contrast in CPU load variance across ML-based IDSs against the variance in, highlighting a significant influence of IDS selection on CPU load. The remarkably low p-value corroborates this finding, conclusively demonstrating the substantial differences in CPU load among the IDSs. Furthermore, we observed similar p-values \\((< 0.05)\\) across other attacks, including brute force, DoS, and the port scan, so we do not report them. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS under different cyber threats." + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.683, + 0.166, + 0.699 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.71, + 0.462, + 0.921 + ], + "angle": 0, + "content": "DL-based IDS, such as CNN, LSTM, and hybrids, perform more efficiently in managing computational demands across diverse types of cyber threats than traditional ML-based IDS, such as KNN, DT, and RF, as they exhibit higher CPU loads at the edge. This pattern suggests that DL-based IDS' intrinsic efficiency is not attack-specific but rooted in their architecture, making them especially suited for real-time applications at edge gateway. These results are expected, as traditional ML-based IDS (e.g., KNN, DT, RF) perform computationally expensive operations during inference, unlike DL-based IDS, which optimizes processing through parallelization and learned feature extraction." + }, + { + "type": "title", + "bbox": [ + 0.537, + 0.522, + 0.627, + 0.537 + ], + "angle": 0, + "content": "CPU Usage:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.538, + 0.916, + 0.747 + ], + "angle": 0, + "content": "Figure 3 compares the average CPU usage of various ML-based IDS models under different cyberattacks. The KNN model consistently exhibits the highest CPU usage across all attack types, indicating its high computational demand, which limits its use in resource-constrained environments. The RF and DT models are also CPU-bound, though they are less intensive than KNN. In contrast, the LSTM model demonstrates the lowest CPU usage, making it the most efficient option for scenarios where minimizing resource consumption is critical. The hybrid of the LSTM and CNN model, along with the CNN and EIDM models, offer a balance between inference accuracy and computational efficiency, making them viable choices for environments with moderate resource availability." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.748, + 0.66, + 0.762 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.762, + 0.916, + 0.927 + ], + "angle": 0, + "content": "Table 5 presents our ANOVA results. Our results reveal significant differences in CPU load among diverse ML-based IDS under DDoS, as evidenced by a compelling F-statistic of 60.39 and a p-value \\(< 0.05\\). This F-statistic highlights the variance in CPU load across IDS groups compared to the variance in, underscoring a significant impact of IDS selection on CPU load. The exceedingly small p-value further supports this conclusion. Moreover, we observed similar p-values (below 0.05) across various cyber threats, such as brute force, DoS, and the port scan, so we do not report those results." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.952, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 10 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.07, + 0.755, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.354, + 0.745, + 0.37 + ], + "angle": 0, + "content": "Figure 3: The Average CPU usage of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.395, + 0.485, + 0.423 + ], + "angle": 0, + "content": "Table 5 ANOVA results: CPU Usage for ML-based IDS under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.427, + 0.855, + 0.5 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.863601.6460.39< 0.05
in groups915426.4959.62
Total9727036.36278.73
" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.526, + 0.168, + 0.54 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.552, + 0.463, + 0.734 + ], + "angle": 0, + "content": "Our analysis reveals that traditional ML-based IDS such as KNN, DT, and RF exhibit increased CPU usage under various cyber threats, thus posing challenges for the edge. Also, LSTM and other DL-based IDS exhibit lower CPU demands. This consistent efficiency across various attacks highlights the benefit of adopting DL-based IDS at the edge gateway. The increased CPU usage of KNN, DT, and RF reflects their reliance on instance-based and tree-splitting operations, which require repeated evaluations. In contrast, DL models efficiently process data in structured layers, reducing computational strain." + }, + { + "type": "title", + "bbox": [ + 0.106, + 0.751, + 0.267, + 0.765 + ], + "angle": 0, + "content": "Energy consumption:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.766, + 0.486, + 0.916 + ], + "angle": 0, + "content": "Figure 4 shows that the LSTM and DT models are the most energy-efficient across different types of cyberattacks, consistently exhibiting the lowest energy consumption. The CNN model also performs efficiently, with slightly higher energy usage. The LSTM, CNN model hybrid, and EIDM have moderate energy consumption, balancing complexity and efficiency. In contrast, the KNN model has the highest energy consumption across all scenarios, making it less suitable for energy-constrained environments. The RF model falls in between, with moderate energy demands." + }, + { + "type": "title", + "bbox": [ + 0.536, + 0.523, + 0.684, + 0.538 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.539, + 0.917, + 0.78 + ], + "angle": 0, + "content": "We conducted the ANOVA, and the results presented in Table 6 reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by F-statistic of 57.44 and a p-value of \\(< 0.05\\). This F-statistic delineates the contrast in energy consumption variance across the group of IDSs against the variance in, highlighting a significant influence of IDS selection on energy consumption. The extremely low p-value further supports this conclusion, conclusively demonstrating the substantial differences in energy consumption among the IDSs. In addition, we observed similar p-values (\\(< 0.05\\)) for other cyber threats, such as brute force, DoS, and the port scan, so we do not report the results. This observation demonstrates significant differences in energy consumed among various ML-based IDS when faced with differing cyber threats." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 11 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.07, + 0.758, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.352, + 0.776, + 0.368 + ], + "angle": 0, + "content": "Figure 4: The Average Energy consumption of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.081, + 0.393, + 0.543, + 0.421 + ], + "angle": 0, + "content": "Table 6 ANOVA results: energy consumption for ML-based IDS under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.425, + 0.857, + 0.497 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups647732.077955.3457.44< 0.05
in groups9813571.72138.48
Total10461303.80589.45
" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.523, + 0.169, + 0.538 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.55, + 0.465, + 0.76 + ], + "angle": 0, + "content": "Our analysis concludes a marked discrepancy in energy consumption, with traditional ML-based IDS such as KNN, RF, and DT exhibiting significantly higher energy consumption under cyber threats such as DDoS and brute force, a drawback for energy-constrained at the edge. In contrast, DL-based IDS models, LSTM, CNN, EIDM, and their hybrids excel in energy efficiency, making them the preferable choice for the edge. Traditional ML models' higher energy consumption results from their iterative computations and lack of optimized inference paths, making them less viable for real-time IoT applications where power efficiency is crucial." + }, + { + "type": "title", + "bbox": [ + 0.081, + 0.78, + 0.363, + 0.796 + ], + "angle": 0, + "content": "5.2. Experimental finding for RQ2" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.797, + 0.486, + 0.84 + ], + "angle": 0, + "content": "This section presents our experimental results for IoT-edge devices with SDN integration during real-time cyber threats." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.841, + 0.169, + 0.854 + ], + "angle": 0, + "content": "CPU Load:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.856, + 0.486, + 0.933 + ], + "angle": 0, + "content": "In Figure 5, we illustrate the CPU load of various ML-based IDS models under different cyberattacks in an SDN-enabled at the edge gateway. The analysis shows that KNN and DT models have the highest CPU load, especially during DDoS and DoS, indicating significant resource demands at" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.521, + 0.916, + 0.626 + ], + "angle": 0, + "content": "the edge. Conversely, the LSTM model demonstrates the lowest CPU load, highlighting its efficiency in resource management. The CNN model also performs efficiently but not as well as LSTM. The LSTM and CNN model hybrid, similar to EIDM, offers balanced performance, making them suitable for scenarios where moderate CPU efficiency is required at the edge." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.627, + 0.66, + 0.641 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.642, + 0.917, + 0.837 + ], + "angle": 0, + "content": "We conducted an ANOVA for the case of the DDoS attack, and the results are presented in Table 7. The results reveal significant differences in CPU load among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 142.57 and a p-value of \\(< 0.05\\). This F-statistic highlights the variance in CPU load across IDSs compared to the variance in them, indicating a significant impact of IDS selection on CPU load. In addition, consistent p-values \\((< 0.05)\\) were observed across other cyber threats, including brute force, DoS, and the port scan, and we do not report the result. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS when subjected to different cyber threats." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.953, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 12 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.07, + 0.761, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.258, + 0.35, + 0.741, + 0.366 + ], + "angle": 0, + "content": "Figure 5: The Average CPU load of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.392, + 0.524, + 0.418 + ], + "angle": 0, + "content": "Table 7 ANOVA results: CPU load for ML-based IDS in SDN under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.421, + 0.855, + 0.493 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61184.21197.36142.57< 0.05
in groups91125.971.38
Total971310.1813.50
" + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.519, + 0.167, + 0.535 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.545, + 0.462, + 0.711 + ], + "angle": 0, + "content": "The findings demonstrate that traditional ML-based IDS, e.g., DT, exhibit elevated loads under DDoS and DoS. In contrast, DL-based IDSs, including EIDM, LSTM, CNN, and their hybrids, demonstrate superior energy efficiency, making them suitable for SDN-enabled at the edge gateway. The integration of SDN helps balance network resource allocation. Yet, traditional ML-based IDS still exhibit higher CPU load due to their design, reinforcing the efficiency advantage of DL-based models in dynamic network environments." + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.727, + 0.198, + 0.741 + ], + "angle": 0, + "content": "CPU Usage:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.742, + 0.486, + 0.863 + ], + "angle": 0, + "content": "Figure 6 shows that CPU usage across various ML-based IDS models in an SDN-enabled edge gateway is fairly consistent across different attack scenarios. Only minor variations are observed, as CNN, LSTM, and hybrid versions demonstrate relatively lower CPU usage, indicating efficient resource management. The DT, KNN, and RF models also show consistent CPU usage across attacks. The EIDM model balances efficiency and performance well." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.863, + 0.231, + 0.877 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.878, + 0.486, + 0.924 + ], + "angle": 0, + "content": "We conducted an ANOVA for the results we got for ML-based IDS in SDN under the DDoS attack. The results presented in Table 8 reveal significant differences in CPU" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.517, + 0.918, + 0.683 + ], + "angle": 0, + "content": "usage among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 5.94 and a p-value of \\(< 0.05\\). This F-statistic highlights the variance in CPU usage across the group of IDSs compared to the variance in, indicating a significant impact of IDS selection on CPU usage. In addition, we observed a consistently low p-value \\((< 0.05)\\) for other examined cyber threats (not reported in the paper), including brute force, DoS, and port scan, reinforcing the presence of marked differences in CPU usage among diverse ML-based IDS when subjected to different cyber threats." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 13 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.07, + 0.753, + 0.329 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.354, + 0.745, + 0.37 + ], + "angle": 0, + "content": "Figure 6: The Average CPU usage of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.395, + 0.536, + 0.423 + ], + "angle": 0, + "content": "Table 8 ANOVA results: CPU usage for ML-based IDS in SDN under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.427, + 0.857, + 0.5 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups627.974.665.94< 0.05
in groups9171.320.78
Total9799.301.02
" + }, + { + "type": "title", + "bbox": [ + 0.11, + 0.526, + 0.169, + 0.54 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.552, + 0.465, + 0.779 + ], + "angle": 0, + "content": "In the context of SDN-enhanced IoT, deploying DL-based IDS with advanced models such as CNN, LSTM, EIDM, and their hybrids demonstrates efficient energy consumption. These models achieve reduced CPU usage against brute force and port scan, benefiting from the centralized resource optimization afforded by SDN. Nonetheless, the complexity of DDoS and DoS presents a significant challenge, necessitating increased computational resources. Although SDN optimizes network operations, IDS models such as KNN and RF remain resource-intensive due to their frequent computational overhead. At the same time, DL-based IDS maintains efficiency through batch processing and learned representations." + }, + { + "type": "title", + "bbox": [ + 0.107, + 0.796, + 0.267, + 0.81 + ], + "angle": 0, + "content": "Energy consumption:" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.811, + 0.486, + 0.932 + ], + "angle": 0, + "content": "Figure 7 depicts the average energy consumption of ML-based IDS models under different attacks in an SDN environment. The results indicate that traditional ML models consume more energy, especially during port scans, e.g., DT, KNN, and RF. In contrast, the EIDM model consistently shows lower energy consumption across all attack types, highlighting its efficiency. The LSTM and CNN models display moderate energy usage, including their hybrid" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.524, + 0.916, + 0.584 + ], + "angle": 0, + "content": "version. Compared to non-SDN environments, the increased energy consumption in the SDN setup is attributed to the SDN controller's active role in traffic management and threat response, which demands more energy resources." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.584, + 0.66, + 0.599 + ], + "angle": 0, + "content": "Statistical Findings:" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.599, + 0.917, + 0.81 + ], + "angle": 0, + "content": "We applied ANOVA on energy consumption data across ML-based IDSs in SDN under DDoS. The results, presented in Table 9, reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by an impressive F-statistic of 18.27 and a p-value of \\(< 0.05\\). This F-statistic highlights the variance in energy consumption across a group of IDSs compared to the variance in, indicating a significant impact of IDS selection on energy consumption. Moreover, a consistently low p-value (\\(< 0.05\\)) was observed across other cyber threats, including brute force, DoS, and port scan, so we do not report the results here. This highlights marked differences in CPU usage among diverse ML-based IDS when subjected to examined cyber threats." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.953, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 14 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.212, + 0.07, + 0.758, + 0.328 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.221, + 0.352, + 0.776, + 0.368 + ], + "angle": 0, + "content": "Figure 7: The Average Energy consumption of ML-based IDS under cyber threats." + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.393, + 0.596, + 0.421 + ], + "angle": 0, + "content": "Table 9 ANOVA results: Energy consumption for ML-based IDS in SDN under DDoS." + }, + { + "type": "table", + "bbox": [ + 0.143, + 0.425, + 0.857, + 0.497 + ], + "angle": 0, + "content": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61263.26210.5418.27< 0.05
in groups911048.2111.51
Total972311.4823.82
" + }, + { + "type": "title", + "bbox": [ + 0.109, + 0.522, + 0.166, + 0.538 + ], + "angle": 0, + "content": "Finding" + }, + { + "type": "text", + "bbox": [ + 0.107, + 0.55, + 0.462, + 0.851 + ], + "angle": 0, + "content": "The findings accentuate the distinct energy efficiency profiles of ML-based IDSs when exposed to various cyber threat scenarios. During brute force and the port scan, traditional ML-based IDS such as DT, KNN, and RF are observed to have higher energy consumption. This indicates that these models are not energy-efficient under the examined conditions due to their complex computational frameworks. On the other hand, DL-based IDS and the EIDM show markedly superior energy efficiency. The reduced energy footprint of DL-based IDS is especially advantageous in the context of the SDN-enabled at the edge, where low energy consumption is crucial due to device constraints and the need for long-term, autonomous operation. The reduction in energy consumption observed in DL-based IDS when integrated with SDN highlights the benefits of centralized network control and optimized workload distribution, making them a more sustainable choice for IoT security." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.52, + 0.907, + 0.567 + ], + "angle": 0, + "content": "5.3. Analyzing the Impact of SDN on CPU Usage, Load, and Energy Efficiency in ML-Based IDS" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.569, + 0.918, + 0.766 + ], + "angle": 0, + "content": "Figure 8 demonstrates that integrating SDN with ML-based IDS in the edge gateway significantly improves resource efficiency, reducing energy consumption, CPU usage, and CPU load. The most substantial improvement is in CPU usage, where DL-based IDS, e.g., LSTM and CNN, outperform traditional ML models by efficiently handling complex computations through parallel processing. Additionally, SDN integration reduces CPU load by balancing workloads, essential for real-time threat detection in edge gateway. The observed reduction in energy consumption further highlights the approach's suitability for battery-powered edge gateway, confirming its scalability and practicality for real-world applications." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.787, + 0.882, + 0.821 + ], + "angle": 0, + "content": "6. ML-Based IDS vs. Signature-Based IDS (Snort)" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.826, + 0.917, + 0.933 + ], + "angle": 0, + "content": "This section compares our ML-based IDS models and the signature-based Snort IDS to evaluate the performance improvements achieved by leveraging ML-based IDS over traditional detection systems. This comparison is essential to highlight the advantages of ML-based approaches regarding resource efficiency, scalability, and adaptability, especially in edge gateway." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 15 of 21" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.286, + 0.043, + 0.714, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "image", + "bbox": [ + 0.237, + 0.069, + 0.761, + 0.29 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.084, + 0.297, + 0.916, + 0.326 + ], + "angle": 0, + "content": "Figure 8: Reduction in energy consumption, CPU usage, and CPU load for ML-based IDS models with SDN integration in edge gateway." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.359, + 0.485, + 0.403 + ], + "angle": 0, + "content": "The results presented in Table 10 provide a comparative analysis of our ML-based IDS models against the signature-based Snort IDS discussed in other research." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.405, + 0.486, + 0.81 + ], + "angle": 0, + "content": "Regarding CPU usage, Snort IDS shows high utilization under heavy traffic due to its reliance on predefined rules and signature matching. In contrast, the ML-based IDS models demonstrate better CPU efficiency. While traditional ML models, e.g., DT and KNN, have higher CPU usage because of iterative computations, DL-based IDS, e.g., LSTM, CNN, and a hybrid of LSTM and CNN, EIDM exhibits lower CPU usage. This is primarily due to DL-based IDS's ability to process data in batches and leverage parallel processing for real-time threat detection. For energy consumption, Table 10 shows that Snort IDS consumes more energy, especially in IoT networks requiring multiple containers. However, our ML-based IDS models, especially DL architectures, e.g., LSTM and EIDM, demonstrate superior energy efficiency. These models optimize resource usage and process data efficiently, making them suitable for resource-constrained edge gateway and highlighting their scalability advantages. Finally, in terms of CPU load, Table 10 indicates that earlier versions of Snort IDS suffer from high CPU load on a single core because of their single-threaded architecture. Although newer versions introduce multi-threading, they still encounter processing bottlenecks under heavy traffic. Conversely, the ML-based IDS models distribute the CPU load more effectively across multiple cores. DL-based IDS, especially LSTM and hybrid architectures, achieve the lowest CPU load levels due to their parallel execution capabilities and efficient handling of sequential data." + }, + { + "type": "title", + "bbox": [ + 0.085, + 0.833, + 0.202, + 0.848 + ], + "angle": 0, + "content": "7. Discussion" + }, + { + "type": "text", + "bbox": [ + 0.085, + 0.855, + 0.485, + 0.929 + ], + "angle": 0, + "content": "Our investigations explored the performance metrics of ML-based IDS with various models, especially in IoT-edge devices with and without SDN integration. Our study was primarily evaluating the impact of these models on CPU load, CPU usage, and energy consumption amidst diverse" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.357, + 0.916, + 0.93 + ], + "angle": 0, + "content": "cyberattack scenarios. The empirical findings revealed significant disparities in resource utilization across different ML-based IDS, shedding light on crucial aspects of their deployment in IoT devices integrated with SDN. The KNN, DT, and RF significantly exhibited higher CPU load, CPU usage, and energy consumption, especially under specific types of cyberattacks. While these models are adept at identifying threats, their resource-intensive nature could pose challenges in the IoT context, where computational resources are often limited. This could lead to diminished performance or instability in environments with constrained resources. Specifically, KNN's higher variance in CPU load and energy consumption, as observed in Tables 4 and 5, stems from its lazy learning approach. Unlike other models, KNN does not build a generalized model during training but instead stores the entire dataset and computes distances at query time. This results in increased processing demands, leading to fluctuations in resource utilization. Such behavior makes KNN less suitable for real-time IDS applications in resource-constrained IoT networks[72] [73]. While CPU load significantly impacts energy consumption, it is not the sole factor. Memory operations, network activity, peripheral devices, and thermal management also contribute to power usage in IoT devices. High data transmission rates and active sensors can increase energy demands, while sustained CPU load may trigger additional energy consumption for cooling mechanisms. Although a strong correlation between CPU load and energy consumption is expected, these factors introduce variations across IDS models. Optimizing IDS efficiency can help balance security and resource constraints in IoT networks. Conversely, the CNN and LSTM models demonstrated greater efficiency in resource utilization. While their architectures are sophisticated and adept at processing complex data structures, they appear to optimize the computational load during inference when employed in IDS. This makes them more suitable for scenarios where resource conservation is critical. However, the complexity of these models introduces its own set of challenges, especially" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 16 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "table_caption", + "bbox": [ + 0.082, + 0.074, + 0.577, + 0.101 + ], + "angle": 0, + "content": "Table 17 Comparative Resource Utilization of ML-Based IDS and Snort IDS Based" + }, + { + "type": "table", + "bbox": [ + 0.108, + 0.105, + 0.891, + 0.458 + ], + "angle": 0, + "content": "
MetricSnort IDSML-Based IDS (Our Findings)
CPU Usage- High Traffic Conditions: CPU usage can reach its maximum during initialization with many active rules [67].\n- Multi-Core Systems: Snort 3.0 utilizes a significant portion of CPU resources on a multi-core processor [68] [69].- Traditional ML Models (DT, KNN, RF): Tend to exhibit higher CPU usage during real-time cyber threats, especially those requiring intensive computations.\n- DL-Based Models (CNN, LSTM, Hybrid of LSTM and CNN and EIDM): Show lower CPU usage compared to traditional ML models, with LSTM models demonstrating the most efficient utilization due to sequential data processing and parallelization.
Energy Consumption- IoT Deployment: Deployment of Snort on IoT gateways results in considerable energy consumption [70].- Traditional ML-based IDS: Generally consume more energy during inference cycles due to repetitive computations.\n- DL-Based Models: Exhibit better energy efficiency, especially models that combine convolutional and sequential layers, benefiting from optimized processing structures.
CPU Load- Single-Core Utilization: Older Snort versions (pre-3.0) lead to high load on a single core under heavy traffic [71].\n- Multi-Core Systems: Updated versions distribute the load but still face processing bottlenecks under extensive traffic [71].- Traditional ML-based IDS: Often show higher CPU load during complex attack scenarios.\n- DL-Based Models: Maintain a lower CPU load, benefiting from parallel processing capabilities, with hybrid models showing the most balanced load distribution.
" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.481, + 0.486, + 0.51 + ], + "angle": 0, + "content": "in terms of training and ongoing maintenance in the dynamic landscape of IoT devices integrated with SDN." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.512, + 0.487, + 0.737 + ], + "angle": 0, + "content": "The balance between detection efficiency and resource consumption is especially critical at edge gateway, where devices often have limited processing power and energy reserves. This balance is closely tied to several United Nations Sustainable Development Goals (SDGs), especially SDG 9 (Industry, Innovation, and Infrastructure), SDG 11 (Sustainable Cities and Communities), and SDG 13 (Climate Action). Optimizing IDS deployment in smart cities strengthens cybersecurity infrastructure, directly supporting SDG 9 while fostering resilient, sustainable urban environments in line with SDG 11. Furthermore, by prioritizing energy-efficient IDS solutions, this research contributes to SDG 13, promoting responsible resource consumption and mitigating the environmental impact of growing IoT networks [74]." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.737, + 0.487, + 0.932 + ], + "angle": 0, + "content": "To aid IoT developers in selecting appropriate IDS solutions, we provide detailed guidelines in Table 11 and Table 12, outlining the performance trade-offs of seven different ML-based IDS models for IoT devices examined in this paper, both with and without SDN integration. These insights enable developers to make informed decisions, ensuring the optimal balance between security and resource efficiency during application development. We use graphical indicators (smiley faces) instead of numerical values to provide an intuitive, high-level comparison of IDS performance. This visual approach simplifies decision-making for IoT developers, aligning with similar methodologies used in prior work [75]. Moreover, all corresponding numerical values" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.481, + 0.915, + 0.51 + ], + "angle": 0, + "content": "related to CPU usage, CPU load, and energy consumption are presented in the Figures and Tables in Section 5." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.512, + 0.917, + 0.797 + ], + "angle": 0, + "content": "On the other hand, to the best of our knowledge, only Tekin et al. [12] have explored a similar direction in evaluating the performance of ML-based IDS in IoT systems. However, our study takes a fundamentally different approach, especially in how computational resources are classified and utilized, which plays a critical role in the effectiveness and scalability of IoT systems. While Tekin et al. focus on energy consumption and inference times using Raspberry Pi as an IoT device, our study emphasizes the advantages of processing data at the edge, especially regarding energy efficiency, CPU load, and usage. We show how models such as DT and RF benefit from edge processing, reducing latency and improving responsiveness, especially when combined with SDN, which optimizes network traffic and resource allocation. Our findings underscore the importance of balancing computational tasks across the network using SDN to maintain performance, unlike Tekin et al. [12], who do not explore the impact of edge computing or SDN integration." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.819, + 0.707, + 0.836 + ], + "angle": 0, + "content": "8. Threat and validity" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.841, + 0.917, + 0.903 + ], + "angle": 0, + "content": "Empirical research inevitably encounters issues related to the validity of findings. In light of this, the present section seeks to identify and discuss possible threats to our research's validity, per the recommendations of Wohlin et al. [76]." + }, + { + "type": "footer", + "bbox": [ + 0.083, + 0.952, + 0.417, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.818, + 0.952, + 0.915, + 0.968 + ], + "angle": 0, + "content": "Page 17 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.073, + 0.495, + 0.101 + ], + "angle": 0, + "content": "Table 11 Guideline for selecting seven ML-based IDS in edge gateway." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.105, + 0.818, + 0.169 + ], + "angle": 0, + "content": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
" + }, + { + "type": "table_caption", + "bbox": [ + 0.085, + 0.188, + 0.531, + 0.215 + ], + "angle": 0, + "content": "Table 12 Guideline for selecting seven ML-based IDS in SDN-edge gateway." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.219, + 0.818, + 0.282 + ], + "angle": 0, + "content": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.284, + 0.868, + 0.299 + ], + "angle": 0, + "content": "The energy consumption and CPU usage in all ML-based IDS lowered during the brute force attack and port scan." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.323, + 0.254, + 0.338 + ], + "angle": 0, + "content": "8.1. Internal Threats" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.34, + 0.486, + 0.655 + ], + "angle": 0, + "content": "During our empirical study on ML-based IDS in the context of IoT devices with IoT devices integrated with SDN, we recognized the existence of internal obstacles that impact the credibility of our findings. The precision of our performance measures is of utmost importance, namely the measurement of CPU load, CPU usage, and energy consumption in these intricate network settings. The complex characteristics of IoT devices and the adaptable structure of SDN provide significant difficulties in guaranteeing accurate and dependable performance evaluations. To address these concerns, we performed fifteen experiments on our testbeds. To improve the trustworthiness of our results in the context of SDN and IoT, we utilized average values to reduce the impact of network or hardware differences and ambient factors. In addition, the cyber threat simulations were conducted using highly practiced cyber security testing mechanisms in academic research and industries in IoT-edge devices integrated with SDN. This work aims to tackle internal risks associated with the setup and precision of ML-based IDS, improving their usefulness and significance in these fast-advancing technical fields." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.668, + 0.264, + 0.683 + ], + "angle": 0, + "content": "8.2. External Threats:" + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.684, + 0.486, + 0.925 + ], + "angle": 0, + "content": "The landscape of network security, especially in IoT-edge devices and IoT-edge devices integrated with SDN realms, is increasingly challenged by external threats. These range from sophisticated cyberattacks such as DoS, DDoS, and brute force attacks to more subtle, yet equally harmful, reconnaissance methods such as a port scan. These threats highlight the urgent need for robust and adaptable IDS solutions. Integrating ML into IDS presents promising advancements in threat detection and mitigation. However, this integration faces challenges due to the complexity of IoT-edge devices, which are marked by numerous interconnected devices, and the dynamic nature of SDN architectures. IDS solutions must be precise in threat detection while also being resource-efficient. Our research evaluates ML-based IDS based on CPU usage, CPU load, and energy consumption, especially under real-time cyber threats. These metrics are" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.324, + 0.916, + 0.503 + ], + "angle": 0, + "content": "vital to ensure that ML-based IDS are effective in protecting networks against external threats and sustainable in their operation. They help maintain a crucial balance between security and performance in the complex ecosystems of IoT devices and IoT devices integrated with SDN. Additionally, to ensure the transparency and reproducibility of our study, we have provided detailed information about the experimental setup and made our testbed and results publicly available for further research [77]. By adopting these measures, we have attempted to provide robust validation and increase the inability to reject our findings among practitioners and researchers." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.524, + 0.636, + 0.539 + ], + "angle": 0, + "content": "9. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.546, + 0.916, + 0.936 + ], + "angle": 0, + "content": "This paper presents a comparative analysis of the ML-based IDS in IoT-edge devices and IoT-edge devices integrated with SDN under different cyberattack scenarios, resulting in comprehension. In IoT systems, conventional ML models (e.g., KNN and DT) often experience increased CPU load and CPU usage, especially when subjected to DoS and DDoS cyber threats. This suggests that these models have limits in resource-limited situations. In contrast, DL-based IDS (e.g., CNN and LSTM) exhibit reduced CPU usage, indicating improved efficiency and compatibility with IoT security. A consistent energy consumption pattern was identified across attack types in both scenarios, encompassing advanced neural networks and conventional methods. The consistent energy efficiency of these models, independent of their computing complexity, highlights their efficacy and long-term viability for use in different network environments. The findings emphasize the significance of choosing ML-based IDS according to their computational efficiency and energy consumption to achieve optimal performance in networks with limited resources. It is imperative to thoroughly evaluate the scalability and robustness of ML-based IDS in future research, especially in more significant and more complex network environments. This assessment will explain their ability to adjust to changing cyber threats. Furthermore, it is crucial to evaluate the influence of new technologies, e.g., 5G and edge computing, on the efficacy" + }, + { + "type": "footer", + "bbox": [ + 0.085, + 0.954, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.954, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 18 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.069, + 0.486, + 0.099 + ], + "angle": 0, + "content": "and suitability of ML-based IDS in advanced network infrastructures." + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.1, + 0.487, + 0.234 + ], + "angle": 0, + "content": "Future research directions should pivot towards optimizing ML-based IDS for enhanced scalability, real-time processing, and energy consumption. The overarching challenge is to develop effective threat detection models that minimally impact system resources. Furthermore, integrating these models into existing IoT devices and IoT devices integrated with SDN infrastructures presents additional challenges, including ensuring compatibility, scalability, and ease of maintenance." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.255, + 0.277, + 0.27 + ], + "angle": 0, + "content": "A. Conflict of interest" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.277, + 0.487, + 0.322 + ], + "angle": 0, + "content": "The authors declare that they have no known conflict of interest or personal relationships that could have appeared to influence the work reported in this paper." + }, + { + "type": "title", + "bbox": [ + 0.082, + 0.344, + 0.276, + 0.361 + ], + "angle": 0, + "content": "B. Acknowledgement" + }, + { + "type": "text", + "bbox": [ + 0.081, + 0.366, + 0.486, + 0.395 + ], + "angle": 0, + "content": "The authors thank Dr. Karim A. Emara et al. for collaborating to share the EIDM-IDS source code." + }, + { + "type": "title", + "bbox": [ + 0.084, + 0.418, + 0.182, + 0.433 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.439, + 0.486, + 0.476 + ], + "angle": 0, + "content": "[1] D. G. Chowdhry, R. Verma, M. Mathur, The Evolution of Business in the Cyber Age: Digital Transformation, Threats, and Security, CRC Press, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.478, + 0.484, + 0.514 + ], + "angle": 0, + "content": "[2] B. Kaur, S. Dadkhah, F. Shoeleh, al., Internet of things (iot) security dataset evolution: Challenges and future directions, Internet of Things (2023) 100780." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.516, + 0.485, + 0.551 + ], + "angle": 0, + "content": "[3] S. Hadzovic, S. Mrdovic, M. Radonjic, A path towards an internet of things and artificial intelligence regulatory framework, IEEE Communications Magazine (2023)." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.553, + 0.485, + 0.589 + ], + "angle": 0, + "content": "[4] K. L. M. Ang, J. K. P. Seng, E. Ngharamike, Towards crowdsourcing internet of things (crowd-iot): Architectures, security, and applications, Future Internet 14 (2) (2022) 49." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.591, + 0.485, + 0.614 + ], + "angle": 0, + "content": "[5] M. Ahmid, O. Kazar, A comprehensive review of the internet of things security, Journal of Applied Security Research 18 (3) (2023) 289-305." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.616, + 0.486, + 0.663 + ], + "angle": 0, + "content": "[6] P. Mall, R. Amin, A. K. Das, M. T. Leung, K.-K. R. Choo, Puf-based authentication and key agreement protocols for IoT, wsns, and smart grids: a comprehensive survey, IEEE Internet of Things Journal 9 (11) (2022) 8205-8228." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.665, + 0.486, + 0.715 + ], + "angle": 0, + "content": "[7] A. Lakhlan, M. A. Mohammed, K. H. Abdulkareem, M. M. Jaber, J. Nedoma, R. Martinek, P. Zmij, Delay optimal schemes for internet of things applications in heterogeneous edge cloud computing networks, Sensors 22 (16) (2022) 5937." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.717, + 0.486, + 0.751 + ], + "angle": 0, + "content": "[8] P. Malhotra, Y. Singh, P. Anand, Bangotra, al, Internet of things: Evolution, concerns and security challenges, Sensors 21 (5) (2021) 1809." + }, + { + "type": "ref_text", + "bbox": [ + 0.091, + 0.754, + 0.486, + 0.79 + ], + "angle": 0, + "content": "[9] A. Djenna, S. Harous, D. E. Saidouni, Internet of things meet the internet of threats: New concern cyber security issues of critical cyber infrastructure, Applied Sciences 11 (10) (2021) 4580." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.792, + 0.486, + 0.828 + ], + "angle": 0, + "content": "[10] M. Almiani, A. AbuGhazleh, A. Al-Rahayfeh, S. Atiewi, A. Razaque, Deep recurrent neural network for IoT intrusion detection system, Simulation Modelling Practice and Theory 101 (2020) 102031." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.829, + 0.486, + 0.878 + ], + "angle": 0, + "content": "[11] T. Rajmohan, P. H. Nguyen, N. Ferry, Research landscape of patterns and architectures for IoT security: a systematic review, in: 2020 46th Euromicro conference on software engineering and advanced applications (SEAA), IEEE, 2020, pp. 463-470." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.879, + 0.486, + 0.916 + ], + "angle": 0, + "content": "[12] N. Tekin, A. Acar, A. Aris, A. S. Uluagac, V. C. Gungor, Energy consumption of on-device machine learning models for IoT intrusion detection, Internet of Things 21 (2023) 100670." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.439, + 0.486, + 0.916 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.071, + 0.915, + 0.109 + ], + "angle": 0, + "content": "[13] A. Hakiri, A. Gokhale, P. Berthou, D. C. Schmidt, T. Gayraud, Software-defined networking: Challenges and research opportunities for future internet, Computer Networks 75 (2014) 453-471." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.111, + 0.915, + 0.159 + ], + "angle": 0, + "content": "[14] K. H. K. Reddy, A. K. Luhach, V. V. Kumar, S. Pratihar, D. Kumar, D. S. Roy, Towards energy efficient smart city services: A software defined resource management scheme for data centers, Sustainable Computing: Informatics and Systems 35 (2022) 100776." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.161, + 0.915, + 0.196 + ], + "angle": 0, + "content": "[15] A. Montazerolghaem, Software-defined internet of multimedia things: Energy-efficient and load-balanced resource management, IEEE Internet of Things Journal 9 (3) (2021) 2432-2442." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.198, + 0.915, + 0.245 + ], + "angle": 0, + "content": "[16] J. Liu, H. Shen, H. S. Narman, W. Chung, Z. Lin, A survey of mobile crowdsensing techniques: A critical component for the internet of things, ACM Transactions on Cyber-Physical Systems 2 (3) (2018) 1-26." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.248, + 0.915, + 0.284 + ], + "angle": 0, + "content": "[17] B. B. Gupta, M. Quamara, An overview of internet of things (iot): Architectural aspects, challenges, and protocols, Concurrency and Computation: Practice and Experience 32 (21) (2020) e4946." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.286, + 0.914, + 0.322 + ], + "angle": 0, + "content": "[18] A. A. Alsulami, Q. A. Al-Haija, A. Tayeb, Anomaly-based intrusion detection system for IoT networks with improved data engineering (2022)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.324, + 0.914, + 0.371 + ], + "angle": 0, + "content": "[19] I. Mukherjee, N. K. Sahu, S. K. Sahana, Simulation and modeling for anomaly detection in IoT network using machine learning, International Journal of Wireless Information Networks 30 (2) (2023) 173-189." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.374, + 0.914, + 0.41 + ], + "angle": 0, + "content": "[20] O. Elnakib, E. Shaaban, M. Mahmoud, K. Emara, Eidm: deep learning model for IoT intrusion detection systems, The Journal of Supercomputing (2023) 1-21." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.412, + 0.914, + 0.459 + ], + "angle": 0, + "content": "[21] M. Douiba, S. Benkirane, A. Guezzzaz, M. Azrour, An improved anomaly detection model for IoT security using decision tree and gradient boosting, The Journal of Supercomputing 79 (3) (2023) 3392-3411." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.461, + 0.914, + 0.497 + ], + "angle": 0, + "content": "[22] S. M. Kasongo, Y. Sun, A deep learning method with wrapper-based feature extraction for wireless intrusion detection system, Computers & Security 92 (2020) 101752." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.499, + 0.914, + 0.535 + ], + "angle": 0, + "content": "[23] A. Verma, V. Ranga, Machine learning-based intrusion detection systems for IoT applications, Wireless Personal Communications 111 (2020) 2287-2310." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.537, + 0.914, + 0.572 + ], + "angle": 0, + "content": "[24] Y. Otoum, D. Liu, A. Nayak, Dl-ids: a deep learning-based intrusion detection framework for securing IoT, Transactions on Emerging Telecommunications Technologies 33 (3) (2022) e3803." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.575, + 0.914, + 0.61 + ], + "angle": 0, + "content": "[25] T. Gaber, A. El-Ghamry, A. E. Hassanien, Injection attack detection using machine learning for smart IoT applications, Physical Communication 52 (2022) 101685." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.612, + 0.914, + 0.66 + ], + "angle": 0, + "content": "[26] U. Sachdeva, P. R. Vamsi, Analysis of deep learning models for anomaly detection in time series IoT sensor data, in: Proceedings of the 2022 Fourteenth International Conference on Contemporary Computing, 2022, pp. 54-62." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.662, + 0.914, + 0.709 + ], + "angle": 0, + "content": "[27] K. Nimmy, M. Dilraj, S. Sankaran, K. Achuthan, Leveraging power consumption for anomaly detection on IoT devices in smart homes, Journal of Ambient Intelligence and Humanized Computing (2022) 1-12." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.712, + 0.914, + 0.748 + ], + "angle": 0, + "content": "[28] R. Chaganti, W. Suliman, V. Ravi, A. Dua, Deep learning approach for sdn-enabled intrusion detection system in IoT networks, Information 14 (1) (2023) 41." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.75, + 0.914, + 0.797 + ], + "angle": 0, + "content": "[29] M. M. Isa, L. Mhamdi, Hybrid deep autoencoder with random forest in native sdn intrusion detection environment, in: ICC 2022-IEEE International Conference on Communications, IEEE, 2022, pp. 1698-1703." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.8, + 0.914, + 0.861 + ], + "angle": 0, + "content": "[30] P. T. Duy, H. Do Hoang, N. H. Khoa, V.-H. Pham, et al., Fool your enemies: Enable cyber deception and moving target defense for intrusion detection in sdn, in: 2022 21st International Symposium on Communications and Information Technologies (ISCIT), IEEE, 2022, pp. 27-32." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.863, + 0.914, + 0.91 + ], + "angle": 0, + "content": "[31] M. A. Bouke, A. Abdullah, S. H. ALshatebi, M. T. Abdullah, E2ids: An enhanced intelligent intrusion detection system based on decision tree algorithm, Journal of Applied Artificial Intelligence 3 (1) (2022) 1-16." + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.071, + 0.915, + 0.91 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.084, + 0.953, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.953, + 0.914, + 0.967 + ], + "angle": 0, + "content": "Page 19 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.071, + 0.486, + 0.12 + ], + "angle": 0, + "content": "[32] L. A. C. Ahakonye, C. I. Nwakanma, J.-M. Lee, D.-S. Kim, Scada intrusion detection scheme exploiting the fusion of modified decision tree and chi-square feature selection, Internet of Things 21 (2023) 100676." + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.122, + 0.486, + 0.159 + ], + "angle": 0, + "content": "[33] M. Hammad, N. Hewahi, W. Elmedany, Mmm-rf: A novel high accuracy multinomial mixture model for network intrusion detection systems, Computers & Security 120 (2022) 102777." + }, + { + "type": "ref_text", + "bbox": [ + 0.083, + 0.16, + 0.485, + 0.209 + ], + "angle": 0, + "content": "[34] K. Albulayhi, Q. Abu Al-Haija, S. A. Alsuhibany, A. A. Jillepalli, M. Ashrafuzzaman, F. T. Sheldon, Iot intrusion detection using machine learning with a novel high performing feature selection method, Applied Sciences 12 (10) (2022) 5015." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.21, + 0.485, + 0.247 + ], + "angle": 0, + "content": "[35] H. Yang, S. Liang, J. Ni, H. Li, X. S. Shen, Secure and efficient km classification for industrial internet of things, IEEE Internet of Things Journal 7 (11) (2020) 10945-10954." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.249, + 0.485, + 0.297 + ], + "angle": 0, + "content": "[36] A. D. Afifaturahman, M. Firmansyah, Perbandingan algorithm k-nearest neighbour (knn) dan naive bayes pada intrusion detection system (ids), Innovation in Research of Informatics (INNOVATICs) 3 (1) (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.298, + 0.485, + 0.371 + ], + "angle": 0, + "content": "[37] F. Z. Belgrana, N. Benamrane, M. A. Hamaida, A. M. Chaabani, A. Taleb-Ahmed, Network intrusion detection system using neural network and condensed nearest neighbors with selection of nsl-kdd influencing features, in: 2020 IEEE International Conference on Internet of Things and Intelligence System (IoTaIS), IEEE, 2021, pp. 23-29." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.373, + 0.485, + 0.422 + ], + "angle": 0, + "content": "[38] Y. Yan, L. Qi, J. Wang, Y. Lin, L. Chen, A network intrusion detection method based on stacked autoencoder and LSTM, in: ICC 2020-2020 IEEE International Conference on Communications (ICC), IEEE, 2020, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.424, + 0.485, + 0.46 + ], + "angle": 0, + "content": "[39] M. D. Hossain, H. Inoue, H. Ochiai, D. Fall, Y. Kadobayashi, Lstmbased intrusion detection system for in-vehicle can bus communications, IEEE Access 8 (2020) 185489-185502." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.461, + 0.485, + 0.498 + ], + "angle": 0, + "content": "[40] A. El-Ghamry, A. Darwish, A. E. Hassanien, An optimized cnn-based intrusion detection system for reducing risks in smart farming, Internet of Things 22 (2023) 100709." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.499, + 0.485, + 0.548 + ], + "angle": 0, + "content": "[41] S. Jamshidi, A. Nikanjam, M. A. Hamdaqa, F. Khomh, Attack detection by using deep learning for cyber-physical system, in: Artificial Intelligence for Cyber-Physical Systems Hardening, Springer, 2022, pp. 155–179." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.549, + 0.485, + 0.586 + ], + "angle": 0, + "content": "[42] P. Sun, P. Liu, Q. Li, C. Liu, X. Lu, R. Hao, J. Chen, Dl-ids: Extracting features using cnn-lstm hybrid network for intrusion detection system, Security and communication networks 2020 (2020) 1–11." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.587, + 0.485, + 0.623 + ], + "angle": 0, + "content": "[43] A. Halbouni, T. S. Gunawan, M. H. Habaebi, M. Halbouni, M. Kartiwi, R. Ahmad, Cnn-lstm: hybrid deep neural network for network intrusion detection system, IEEE Access 10 (2022) 99837-99849." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.624, + 0.485, + 0.66 + ], + "angle": 0, + "content": "[44] D. Stiawan, M. Y. B. Idris, A. M. Bamhdi, R. Budiarto, et al., Cicids-2017 dataset feature analysis with information gain for anomaly detection, IEEE Access 8 (2020) 132911–132921." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.661, + 0.485, + 0.699 + ], + "angle": 0, + "content": "[45] R. Panigrahi, S. Borah, A detailed analysis of cicids2017 dataset for designing intrusion detection systems, International Journal of Engineering & Technology 7 (3.24) (2018) 479-482." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.7, + 0.485, + 0.736 + ], + "angle": 0, + "content": "[46] A. A. Alsulami, Q. Abu Al-Haija, A. Tayeb, A. Alqahtani, An intrusion detection and classification system for IoT traffic with improved data engineering, Applied Sciences 12 (23) (2022) 12336." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.737, + 0.485, + 0.774 + ], + "angle": 0, + "content": "[47] L. Yang, A. Moubayed, I. Hamieh, A. Shami, Tree-based intelligent intrusion detection system in internet of vehicles, in: 2019 IEEE global communications conference (GLOBECOM), IEEE, 2019, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.775, + 0.485, + 0.799 + ], + "angle": 0, + "content": "[48] Great Learning, Label encoding in python, [link], accessed: 2024-03-21 (n.d.)." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.8, + 0.485, + 0.824 + ], + "angle": 0, + "content": "[49] Analytics Vidhya, Overcoming class imbalance using smote techniques, [link], accessed: 2024-03-21 (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.825, + 0.485, + 0.874 + ], + "angle": 0, + "content": "[50] T. N. Sainath, O. Vinyals, A. Senior, H. Sak, Convolutional, long short-term memory, fully connected deep neural networks, in: 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, 2015, pp. 4580-4584." + }, + { + "type": "ref_text", + "bbox": [ + 0.084, + 0.875, + 0.485, + 0.924 + ], + "angle": 0, + "content": "[51] L. Muhammad, A. A. Haruna, U. S. Sharif, M. B. Mohammed, Cnn-lstm deep learning based forecasting model for Covid-19 infection cases in nigeria, south africa and botswana, Health and technology 12 (6) (2022) 1259–1276." + }, + { + "type": "list", + "bbox": [ + 0.083, + 0.071, + 0.486, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.071, + 0.914, + 0.121 + ], + "angle": 0, + "content": "[52] L. Alzubaidi, J. Zhang, A. J. Humaidi, A. Al-Dujaili, Y. Duan, O. Al-Shamma, J. Santamaría, M. A. Fadhel, M. Al-Amidie, L. Farhan, Review of deep learning: concepts, cnn architectures, challenges, applications, future directions, Journal of big Data 8 (2021) 1-74." + }, + { + "type": "ref_text", + "bbox": [ + 0.513, + 0.122, + 0.914, + 0.159 + ], + "angle": 0, + "content": "[53] G. Najera-Gutierrez, J. A. Ansari, Web Penetration Testing with Kali Linux: Explore the methods and tools of ethical hacking with Kali Linux, Packt Publishing Ltd, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.16, + 0.914, + 0.209 + ], + "angle": 0, + "content": "[54] S. Asadollahi, B. Goswami, M. Sameer, Ryu controller's scalability experiment on software defined networks, in: 2018 IEEE international conference on current trends in advanced computing (ICCTAC), IEEE, 2018, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.21, + 0.914, + 0.247 + ], + "angle": 0, + "content": "[55] K. Kaur, J. Singh, N. S. Ghumman, Mininet as software defined networking testing platform, in: International conference on communication, computing & systems (ICCCS), 2014, pp. 139-42." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.248, + 0.914, + 0.272 + ], + "angle": 0, + "content": "[56] L. St, S. Wold, et al., Analysis of variance (anova), Chemometrics and intelligent laboratory systems 6 (4) (1989) 259-272." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.273, + 0.914, + 0.333 + ], + "angle": 0, + "content": "[57] D. Breitenbacher, I. Homoliak, Y. L. Aung, N. O. Tippenhauer, Y. Elovici, Hades-iot: A practical host-based anomaly detection system for iot devices, in: Proceedings of the 2019 ACM Asia conference on computer and communications security, 2019, pp. 479-484." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.335, + 0.914, + 0.385 + ], + "angle": 0, + "content": "[58] B. Chen, Y. Zhang, G. Iosifidis, M. Liu, Reinforcement learning on computational resource allocation of cloud-based wireless networks, in: 2020 IEEE 6th World Forum on Internet of Things (WF-IoT), IEEE, 2020, pp. 1-6." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.386, + 0.914, + 0.422 + ], + "angle": 0, + "content": "[59] R. D. Corin, A. Costanzo, F. Callegati, D. Siracusa, Methods and techniques for dynamic deployability of software-defined security services, CoRR (2020)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.424, + 0.718, + 0.435 + ], + "angle": 0, + "content": "[60] A. van de Ven, Powertop, [link]." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.436, + 0.914, + 0.472 + ], + "angle": 0, + "content": "[61] N. F. Syed, Z. Baig, A. Ibrahim, C. Valli, Denial of service attack detection through machine learning for the IoT, Journal of Information and Telecommunication 4 (4) (2020) 482-503." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.474, + 0.914, + 0.509 + ], + "angle": 0, + "content": "[62] K. Sonar, H. Upadhyay, A survey: Ddos attack on internet of things, International Journal of Engineering Research and Development 10 (11) (2014) 58-63." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.511, + 0.914, + 0.56 + ], + "angle": 0, + "content": "[63] M. M. Raikar, S. Meena, Ssh brute force attack mitigation in internet of things (iot) network: An edge device security measure, in: 2021 2nd international conference on secure cyber computing and communications (ICSCCC), IEEE, 2021, pp. 72-77." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.561, + 0.914, + 0.611 + ], + "angle": 0, + "content": "[64] Q. A. Al-Haija, E. Saleh, M. Alnabhan, Detecting port scan attacks using logistic regression, in: 2021 4th International symposium on advanced electrical and communication technologies (ISAECT), IEEE, 2021, pp. 1-5." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.611, + 0.914, + 0.648 + ], + "angle": 0, + "content": "[65] Z. Campbell, A. Bray, A. Ritz, A. Groce, Differentially private anova testing, in: 2018 1st International Conference on Data Intelligence and Security (ICDIS), IEEE, 2018, pp. 281-285." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.649, + 0.914, + 0.673 + ], + "angle": 0, + "content": "[66] H. Wei, X. Song, Smooth tests for normality in anova, arXiv preprint arXiv:2110.04849 (2021)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.674, + 0.859, + 0.686 + ], + "angle": 0, + "content": "[67] E. Frimpong, A performance study of the snort ids (2008)." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.686, + 0.914, + 0.749 + ], + "angle": 0, + "content": "[68] D. Fadhilah, M. I. Marzuki, Performance analysis of ids snort and ids suricata with many-core processor in virtual machines against dos/ddos attacks, in: 2020 2nd International Conference on Broadband Communications, Wireless Sensors and Powering (BCWSP), IEEE, 2020, pp. 157-162." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.749, + 0.914, + 0.786 + ], + "angle": 0, + "content": "[69] M. Hawedi, C. Talhi, H. Boucheneb, Multi-tenant intrusion detection system for public cloud (mtids), The Journal of Supercomputing 74 (2018) 5199–5230." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.788, + 0.914, + 0.824 + ], + "angle": 0, + "content": "[70] S. M. Raza, J. Jeong, M. Kim, B. Kang, H. Choo, Empirical performance and energy consumption evaluation of container solutions on resource constrained IoT gateways, Sensors 21 (4) (2021) 1378." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.825, + 0.914, + 0.861 + ], + "angle": 0, + "content": "[71] W. Park, S. Ahn, Performance comparison and detection analysis in snort and suricata environment, Wireless Personal Communications 94 (2017) 241-252." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.862, + 0.914, + 0.899 + ], + "angle": 0, + "content": "[72] E. Ozturk Kiyak, B. Ghasemkhani, D. Birant, High-level k-nearest neighbors (hlknn): A supervised machine learning model for classification analysis, Electronics 12 (18) (2023) 3828." + }, + { + "type": "ref_text", + "bbox": [ + 0.514, + 0.9, + 0.914, + 0.924 + ], + "angle": 0, + "content": "[73] E. Altulaihan, M. A. Almaiah, A. Aljughaiman, Anomaly detection ids for detecting dos attacks in IoT networks based on machine learning" + }, + { + "type": "list", + "bbox": [ + 0.513, + 0.071, + 0.914, + 0.924 + ], + "angle": 0, + "content": null + }, + { + "type": "footer", + "bbox": [ + 0.084, + 0.953, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.953, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 20 of 21" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.285, + 0.043, + 0.713, + 0.058 + ], + "angle": 0, + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.072, + 0.325, + 0.084 + ], + "angle": 0, + "content": "algorithms, Sensors 24 (2) (2024) 713." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.085, + 0.486, + 0.109 + ], + "angle": 0, + "content": "[74] U. Nations, United nations goals: Sustainable development, [link], accessed: September 3, 2024 (2023)." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.11, + 0.486, + 0.146 + ], + "angle": 0, + "content": "[75] F. Khomh, S. A. Abtahizadeh, Understanding the impact of cloud patterns on performance and energy consumption, Journal of Systems and Software 141 (2018) 151-170." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.147, + 0.485, + 0.184 + ], + "angle": 0, + "content": "[76] C. Wohlin, P. Runeson, M. Höst, M. C. Ohlsson, B. Regnell, A. Wesslén, Experimentation in software engineering, Springer Science & Business Media, 2012." + }, + { + "type": "text", + "bbox": [ + 0.084, + 0.185, + 0.338, + 0.197 + ], + "angle": 0, + "content": "[77] S. Jamshidi, Replication packages, [link]." + }, + { + "type": "list", + "bbox": [ + 0.084, + 0.085, + 0.486, + 0.197 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.512, + 0.071, + 0.602, + 0.09 + ], + "angle": 0, + "content": "Appendix" + }, + { + "type": "table_caption", + "bbox": [ + 0.558, + 0.104, + 0.868, + 0.119 + ], + "angle": 0, + "content": "Table 13: Abbreviations used in this research." + }, + { + "type": "table", + "bbox": [ + 0.513, + 0.13, + 0.952, + 0.662 + ], + "angle": 0, + "content": "
AbbreviationMeaning
AIArtificial Intelligence
ANOVAAnalysis of Variance
ANNArtificial Neural Network
BTBoosting Tree
CPUCentral Processing Unit
DAEDeep Autoencoder
DDoSDistributed Denial-of-Service
DLDeep Learning
DoSDenial-of-Service
DTDecision Tree
GPUGraphics Processing Unit
IDSIntrusion Detection System
IoTInternet of Things
KNNK-Nearest Neighbor
LRLogistic Regression
LSTMLong Short-Term Memory
CNNConvolutional Neural Network
MCUMicrocontroller Unit
MITMMan-in-the-Middle
MLMachine Learning
MTDMoving Target Defense
NBNaïve Bayes
R2LRoot to Local
RFRandom Forest
RNNRecurrent Neural Network
SDNSoftware-Defined Networking
SDPNStacked-Deep Polynomial Network
SMOSpider Monkey Optimization
SMOTESynthetic Minority Oversampling Technique
SNNSpiking Neural Network
SVMSupport Vector Machine
U2RUser to Root
WFEUWrapper Feature Extraction Unit
WSNWireless Sensor Network
" + }, + { + "type": "footer", + "bbox": [ + 0.084, + 0.953, + 0.416, + 0.967 + ], + "angle": 0, + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + }, + { + "type": "page_number", + "bbox": [ + 0.819, + 0.953, + 0.913, + 0.967 + ], + "angle": 0, + "content": "Page 21 of 21" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a72db9fa9b2018a5dce2f53eca006f87e41b403d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/700831eb-1538-44f2-9e40-5bbd9b316f16_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:275315c214ba75139ab36605daa99d69c761d094ab91f106c875da3e920d71e1 +size 746094 diff --git a/data/2025/2504_09xxx/2504.09634/full.md b/data/2025/2504_09xxx/2504.09634/full.md new file mode 100644 index 0000000000000000000000000000000000000000..e6ce25bfb8616c80319172cc374dedce20356461 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/full.md @@ -0,0 +1,535 @@ +# Evaluating Machine Learning-Driven Intrusion Detection Systems in IoT: Performance and Energy Consumption + +Saeid Jamshidi, Kawser Wazed Nafi, Amin Nikanjam, Foutse Khomh + +SWAT, Polytechnique, Montréal, H3T 1J4, Quebec, Canada + +# ARTICLEINFO + +Keywords: + +Machine Learning, Intrusion Detection System, Energy Consumption, Software-Defined Networking, SDN-IoT + +# ABSTRACT + +In the landscape of network security, the integration of Machine Learning (ML)-based Intrusion Detection System (IDS) represents a significant leap forward, especially in the domain of the Internet of Things (IoT) and Software-Defined Networking (SDN). Such ML-based IDS are crucial for improving security infrastructures, and their importance is increasingly pronounced in IoT systems. However, despite the rapid advancement of ML-based IDS, there remains a gap in understanding their impact on critical performance metrics (e.g., CPU load, energy consumption, and CPU usage) in resource-constrained IoT devices. This becomes especially crucial in scenarios involving real-time cyber threats that challenge IoT devices in a public/private network. + +To address this gap, this article presents an empirical study that evaluates the impact of state-of-the-art ML-based IDSs on performance metrics such as CPU usage, energy consumption, and CPU load in the absence and presence of real-time cyber threats, with a specific focus on their deployment at the edge of IoT infrastructures. We also incorporate SDN to evaluate the comparative performance of ML-based IDSs with and without SDN. To do so, we focus on the impact of both SDN's centralized control and dynamic resource management on the performance metrics of an IoT system. Finally, we analyze our findings using statistical analysis using the Analysis of Variance (ANOVA) analysis. Our findings demonstrate that traditional ML-based IDS, when implemented at the edge gateway with and without SDN architecture, significantly affects performance metrics against cyber threats compared to DL-based ones. Also, we observed substantial increases in energy consumption, CPU usage, and CPU load during real-time cyber threat scenarios at the edge, underscoring the resource-intensive nature of these systems. This research fills the existing knowledge void and delivers essential insights into the operational dynamics of ML-based IDS at edge gateway in IoT systems. + +# 1. Introduction + +The rapid expansion of the Internet of Things (IoT) has ushered in an era where data flows seamlessly across various sectors, driving profound changes in how devices interact [1][2]. This intricate IoT ecosystem, composed of countless devices, sensors, and intelligent nodes, has fundamentally reshaped how we think about device communication, significantly minimizing the need for human involvement [3]. The integration of Software-Defined Networking (SDN) within the IoT landscape represents a significant step forward, creating a unified IoT-SDN framework that offers centralized control, improved network management, and stronger security measures [4][5]. + +The rapid expansion of IoT, driven by the interconnection of millions of devices via Wireless Sensor Networks (WSNs), presents significant challenges [6]. These challenges stem mainly from these devices' limited memory, power, and battery life, highlighting the need for optimized computing and advanced data analysis techniques [7]. Deploying SDN within this framework aims to overcome these obstacles by offering a streamlined, secure network infrastructure that facilitates effective resource allocation and enhanced threat + +management. + +Given the widespread security vulnerabilities in IoT networks, such as service disruptions and unauthorized access, the importance of Machine Learning (ML)-based Intrusion Detection Systems (IDS) has grown [8]. ML-based IDS are crucial for protecting network integrity due to their ability to adapt dynamically and effectively identify threats [9][10] [11]. + +However, despite advancements in developing ML-based IDS for IoT, several critical gaps remain, as highlighted by Tekin et al. [12]. While previous research has examined ML-based IDS's performance in controlled, static testbed environments, there is a significant gap in understanding how these systems operate under the dynamic conditions of real-time cyber threats, especially when IoT is integrated with SDN. Moreover, while the potential of SDN to significantly enhance resource management in IoT systems is widely acknowledged [13][14][15], there is a lack of empirical evidence on how SDN interacts with ML-based IDS during cyber threats. + +In this study, we set two primary objectives designed to deepen our understanding of network performance metrics in IoT. Firstly, we assess the impact of deploying ML-based IDS at edge gateway, mainly focusing on ML-based IDS performance metrics under real-time cyber threats. Secondly, we explore the impact of integrating SDN with our testbed, again at edge gateway, to evaluate its influence on performance metrics under similar cyber threats. The rationale behind incorporating SDN into our testbed is its + +potential to improve resource management in IoT systems significantly [16][17]. We conduct a comparative analysis of the performance of seven state-of-the-art ML-based IDSs in two distinct setups: firstly, at the edge gateway, and secondly, in a similar setup augmented with SDN integration at the edge gateway, all under real-time cyber threats. This analysis is designed to elucidate the impact of SDN on performance metrics and resource management in IoT systems, especially highlighting how SDN integration can optimize the operational efficiency and resilience of IoT networks against the backdrop of evolving cyber threats. To summarize, this paper makes the following contributions: + +- Assessing performance metrics of ML-based IDS in IoT systems under real-time cyber threats: Our investigation revealed the significant impact of seven ML-based IDS on the performance at the edge, specifically measuring CPU usage, CPU load, and energy consumption amidst cyber threats. Utilizing ANOVA, we clarify the operational consequences of deploying these sophisticated IDSs on the edge. +- Evaluating the impact of ML-based IDS at edge integrated with SDN: we evaluated the performance metrics of seven ML-based IDS at the edge gateway system integrated with SDN. Utilizing ANOVA, we clarify the impact of the integrated SDN with IoT on deploying these sophisticated IDS under real-time cyber threats. +- Proposing a plugin-based ML-based IDS test suite: This test suite comes with a group of available datasets and available ML-based IDSs and allows the users to define their own IoT and SDN applications and test their ML-based IDSs and models in terms of detection accuracy and performance metrics. Researchers can efficiently perform comparative analyses for their algorithms and models with other available algorithms and models. The test suite is publicly available (section 8) for researchers and practitioners to reuse. + +The remainder of this paper is organized as follows: Section 2 discusses the review of our research literature. Section 3 discusses the necessary background knowledge. In Section 4, we describe the experimental design, the Research Questions (RQs), and the metrics of the experiments. Section 5 explains our results and findings. Section 8 discusses threats to the validity of our study. Finally, Section 9 concludes the paper and outlines future work. + +# 2. Related Works + +Understanding the performance trade-offs of ML-based IDS in IoT, especially in resource-constrained edge gateways, remains an open challenge. While numerous studies, as mentioned in the previous section, have focused on detection accuracy, limited research has analyzed their real-time computational impact. In particular, there is a significant gap in understanding how ML-based IDS operate under real-time + +cyber threats, especially when integrated with SDN. This section reviews prior works on ML-based IDS in IoT and SDN, examining their strengths and limitations and focusing on ML models and energy consumption concerns. + +# 2.1. IoT Intrusion Detection + +Alsulami et al. [18] proposed a new ML model to identify and categorize network activity in IoT systems. Their research aimed to classify network traffic into distinct categories, including normal behavior and various types of attacks (e.g., Mirai, Denial-of-Service (DoS), Scan, and Man-in-the-Middle (MITM)). The study involved testing several supervised learning models on the customized IoTID20 dataset, including Spiking Neural Networks (SNNs), DT, Boosting Trees (BT), Support Vector Machines (SVM), and KNN. These models, enhanced through deep feature engineering, effectively identified and classified network anomalies. + +Mukherjee et al. [19] conducted an in-depth investigation into the predictive capabilities of supervised learning models (e.g., Logistic Regression (LR), Naïve Bayes (NB), DT, RF, and Artificial Neural Network (ANN)) for anomaly detection. Their study utilized a dataset comprising 350,000 data points. The research compared these models against established state-of-the-art techniques, including BIRCH clustering and K-Means, and evaluated their performance in different scenarios. This included an analysis using the complete dataset and a separate evaluation after removing binary data points in the 'value' feature. The models demonstrated high precision in both scenarios, underscoring their efficacy in practical anomaly forecasting and enhancing security measures against potential risks. + +Elnakib et al. [20] proposed the Enhanced Intrusion Detection Deep Learning Multi-class Classification Model (EIDM), a sophisticated Deep Learning (DL) model designed to enhance security in the IoT context. This model is adept at accurately categorizing 15 distinct traffic characteristics, encompassing a range of 14 discrete attack types. The performance of EIDM was evaluated against four other contemporary models, focusing on classification accuracy and efficiency. The increased precision of EIDM highlights its promise as a powerful solution for safeguarding IoT networks against a wide range of attacks. + +Douiba et al. [21] proposed an innovative IDS to enhance IoT device security. Their approach utilized gradient boosting and DT in the Catboost framework. The model's performance was rigorously assessed on several datasets, including NSL-KDD, IoT-23, BoT-IoT, and Edge-IIoT, with optimization achieved through GPU acceleration. The IDS distinguished itself with its ability to detect anomalies in real-time and its computing efficiency, demonstrating high accuracy, recall, and precision metrics, around $99.9\%$ on a record detection and computation time. + +Kasongo et al. [22] presented a research endeavor in which they proposed a Feed-Forward Deep Neural Network (FFDNN) IDS, enhanced by the inclusion of a Wrapper Feature Extraction Unit (WFEU) utilizing the Extra Trees + +algorithm. The WFEU-FFDNN was evaluated for its performance on several datasets, including UNSW-NB15 and AWID, and compared with traditional ML methods. The system demonstrated high classification accuracies in binary and multiclass classifications across these datasets, significantly outperforming in scenarios involving the AWID dataset. The enhanced precision of the WFEU-FFDNN model emphasizes its efficacy in real-time anomaly detection and computing efficiency. + +In addition to all of the works stated above, Verma et al. [23] examined ML algorithms in the context of augmenting security measures in the IoT. The researchers compared classifiers using benchmark datasets (e.g., CIDDS-001, UNSW-NB15, and NSL-KDD). This analysis was supported by statistical tests, namely the Friedman and Nemenyi tests. The researchers also evaluated the reaction times on the Raspberry Pi platform, showcasing the adaptability and efficiency of the classifiers in IoT scenarios, hence emphasizing their practical relevance. + +Otoum et al. [24] presented a scholarly investigation in which they propose a DL-powered intrusion detection system (DL-based IDS) to effectively address challenges associated with feature learning and dataset management. The DL-based IDS developed by the researchers integrates the Spider Monkey Optimization(SMO) algorithm with the stacked-deep polynomial network (SDPN) to enhance threat identification. The system can detect various abnormalities, including DoS, User to Root attacks (U2R), probing, and Root-to-local attacks (R2L). The DL-based IDS was evaluated using the NSL-KDD dataset and exhibited outstanding performance metrics, showcasing its efficacy in various aspects of threat detection. + +Gaber et al. [25] highlight securing IoT systems, especially in complex environments ( e.g., smart cities). The authors introduced a feature selection methodology that combines constant removal and recursive feature elimination strategies. They utilized a DT classifier with a subset of 8 characteristics, assessed on the AWID dataset using various ML classifiers. In contrast to existing methods, their approach exhibited exceptional performance, achieving high accuracy, precision, and F1 score rates. These results underscore the potential of their methodology in the domain of IoT-IDS. + +Sachdeva et al. [26] investigate the issue of fortifying cybersecurity in IoT networks to mitigate the impact of distributed denial-of-service (DDoS) attacks. The authors put out an innovative approach for data pre-processing, which involves the integration of ML and DL classifiers. The class imbalances in the BOT-IoT and TON-IoT datasets from UNSW Australia are mitigated using several Synthetic Minority Oversampling Technique (SMOTE) variants. The hybrid methodology employed in this study, which integrates many algorithms, demonstrates the promising prospects for efficient detection of DDoS attacks in IoT networks. + +The related works discussed above show that the most ML-based IDS developed and re-used by researchers are DT, KNN, RF, LSTM, CNN, and a hybrid model of CNN and + +LSTM. In addition, EIDM is the most recent work that has overcome the limitations of the previous ML models. That is why we proceed with all these six ML-based IDS to carry out our study in this paper. + +# 2.2. Energy consumption in IDS + +Only a tiny amount of research has been done so far to determine the energy consumption in IDS. Among them, Tekin et al. [12] investigated the topic of IDS in the context of the IoT, with a specific focus on the energy consumption aspect in devices with limitations. The authors assessed various ML paradigms in the context of cloud computing, edge computing, and IoT devices. They specifically emphasize the promising capabilities of TinyML for microcontroller units (MCUs). DT algorithm demonstrates in terms of training, inference, and power efficiency. Although Naive Bayes (NB) has superior training speed, it exhibits a minor accuracy trade-off requirements of the KNN algorithm increase proportionally with the quantity of the dataset, hence diminishing its suitability for deployment in IoT systems. Both DT and RF exhibit low power consumption and high accuracy. However, it is essential to consider that RF's longer execution time represents a trade-off. The research findings also elucidate the advantages and constraints of cloud-based ML, underscoring the significance of algorithm choice in practical implementations. + +Nimmy et al. [27] utilize the energy consumption patterns of IoT devices to identify irregularities in smart home environments. They developed a prototype of a smart camera based on Raspberry Pi to gather power traces during regular operations and simulated DDoS attacks. This approach emphasizes the importance of energy consumption as a crucial indicator of aberrant behaviors. The deep feedforward neural network used in their study demon- strates exceptional performance in identifying anomalies, as evidenced by rigorous evaluations of ML models. This indicates its potential to enhance the security of smart homes significantly. + +# 2.3. IoT Intrusion Detection in SDN + +Chaganti et al. [28] present a sophisticated IDS for IoT networks. This system leverages SDN and specifically emphasizes the utilization of DL techniques. The research is for its utilization of LSTM networks, a Recurrent Neural Network (RNN) type renowned for its efficacy in handling time series data, which is critical in detecting network threats. The authors' principal contribution is utilizing an LSTM model, which they employ to discern network attacks. To evaluate the efficacy of their approach, the authors conduct a comparative analysis with alternative architectures(e.g., SVM). The experimental findings present solid evidence that highlights the improved efficacy of the LSTM model in accurately categorizing various network attacks. The LSTM model demonstrated exceptional accuracy and efficiency in detecting attack patterns, surpassing conventional ML models in precision and recall metrics. + +M. M. Isa et al. [29] present the DAERF model in their research, an innovative IDS for SDN. This model combines + +a Deep Autoencoder (DAE) with an RF algorithm, creating a unique approach. The DAE excels in feature extraction and data dimensionality reduction. At the same time, the RF approach, known for using an ensemble of DTs, shows significant accuracy and robustness in classification tasks. The DAERF model was evaluated in a simulated SDN using commonly used datasets, demonstrating a high efficacy level. The integration of DL and ML in the DAERF model represents a novel approach that effectively identifies and categorizes network intrusions, enhancing the security of SDN systems and ensuring their capability to handle real-time applications with scalability and adaptability. + +Phan The Duy et al. [30] presented 'FoolYE,' an innovative IDS designed specifically for SDN systems. The system combines cyber deception techniques with Moving Target Defense (MTD) methodologies. The core of this methodology lies in its ability to create a dynamic and misleading network environment, making it challenging for malicious actors to identify and exploit genuine resources. A key innovation is deep transfer learning-based IDS, which employs advanced DL models (e.g., ResNet50 and DenseNet161), originally designed for image recognition. These models have been adapted using deep transfer learning techniques to analyze network traffic for ML-based IDS, demonstrating the versatility and efficacy of DL in cybersecurity. The study involved experiments in simulated SDN systems, where the performance of the IDS was thoroughly examined, showing its high capability in accurately detecting a wide range of network intrusions. + +Despite advancements in ML-based IDS for IoT, a significant gap remains in understanding their real-time computational impact, especially in energy consumption, CPU load, and CPU usage at the edge gateway. This gap is further compounded by the lack of empirical studies evaluating the effectiveness and efficiency of ML-based IDS in real-world, resource-constrained edge gateway, especially when integrated with SDN during cyber threats. To address these shortcomings, our study provides a comprehensive empirical analysis of ML-based IDS, focusing on their performance trade-offs in SDN-enabled and non-SDN edge gateways. Specifically, we assess how different ML-based IDS models impact system resources under real-time cyber threats, offering critical insights into their feasibility for deployment in IoT networks. + +# 3. Background + +This section dives into the underlying premise of the research's baselines. + +Decision Tree (DT): In the field of IDS, DT is a key ML method for analyzing network data. They use trees, e.g., models, to break down network features into binary decisions, evaluating network attributes at each node to identify effective splits. This creates a rule-based hierarchy that excels at spotting differences between normal and suspicious network activities. DTs are valued for their clarity and ease of interpretation, playing a vital in improving cybersecurity by identifying unusual or unauthorized actions + +[31] [32]. + +Random Forest (RF): The algorithm is highly valued in IDS for its precision in classifying network data. Utilizing RF, an ML algorithm, it creates a group of DT to assess various network attributes, effectively distinguishing between normal and malicious activities. RF excels in managing large datasets, balancing IDS data disparities, and minimizing overfitting, making IoT and network security crucial. It achieves accurate detection of unusual network behaviors [33] [34]. + +K-Nearest Neighbor (KNN): The KNN algorithm is a key IDS tool known for its effective similarity-based classification. It compares network traffic with existing labeled data using distance metrics to classify new instances, with 'k' indicating the number of neighbors considered. This method is crucial for identifying normal versus abnormal network activities, offering a simple yet versatile solution for real-time IDS. KNN excels in both binary and multiclass problems, providing quick, reliable categorizations crucial for responding to threats in dynamic networks [35] [36] [37]. Long short-term memory (LSTM): LSTM networks, a type of recurrent neural network, are highly effective in analyzing sequential data for IDS. Their unique memory cells excel at identifying complex patterns in network traffic, making them adept at spotting advanced threats that traditional methods may miss. LSTMs are especially valuable for maintaining context over data sequences, which is crucial for distinguishing between normal and malicious network activities. Their application in IDS significantly boosts cybersecurity, especially in dynamic and IoT environments, by adapting to new threats and efficiently handling varying data lengths, offering a robust solution to modern cybersecurity challenges [38] [39]. + +Convolutional Neural Network(CNN): CNNs provide a resilient DL methodology for IDS. CNNs are widely recognized for their ability to independently acquire hierarchical features from network traffic. This is achieved through convolutional, pooling, and fully connected layers, which enable the discernment of spatial patterns in the traffic data. This capacity facilitates the recognition of both well-established and new threats. CNN in IDS is considered crucial in enhancing cybersecurity defenses against a wide range of cyber threats due to their capacity to scale effectively and efficiently handle real-time data [40] [41]. + +Hybrid model of LSTM and CNN: The integration of LSTM and CNN models into IDS significantly boosts network security by combining the spatial analysis capabilities of CNNs with the temporal pattern recognition of LSTMs. This hybrid approach detects complex cyber threats by analyzing network traffic data in both spatial and temporal dimensions. CNNs effectively identify security breaches through local pattern recognition, while LSTMs track the sequence of network events over time, offering a detailed understanding of potential threats. This fusion results in more accurate and efficient detection of sophisticated, multistage attacks, reducing false positives and adapting to new threats, thereby enhancing overall anomaly detection and + +maintaining network integrity without excessive alerts [42] [43]. + +EIDM: The EIDM is a cutting-edge IDS approach expertly handling a wide range of network events. Its design combines convolutional and dense layers to tackle the challenges of class diversity and data imbalance. The model begins with a 120-node dense layer, followed by an 80-neuron convolutional layer with a kernel size of 20 to better distinguish between similar network activities. It also features a Maxpooling layer for enhanced feature extraction and a dropout layer to avoid overfitting. EIDM can classify 15 network behaviors through six dense layers, using 'relu' activation and SGD and Adam optimizers for optimal accuracy and efficiency. According to [20], EIDM's unique structure and optimization techniques make it a standout solution for improving network IDS. + +# 4. Study design + +This section describes our methodology to evaluate the impact of specific ML-based IDSs using selected performance metrics. We first mention our Research Questions (RQs), followed by an explanation of the experimental design and the metrics used to evaluate the impact of the ML-based IDS. + +# 4.1. Research questions(RQs) + +Our research aims to address the following RQs: + +- RQ1: How do ML-based IDSs impact CPU usage, CPU load, and energy consumption at the edge gateway without SDN during real-time cyber threats? + +This RQ examines the impact of ML-based IDSs on crucial performance metrics, specifically CPU usage, CPU load, and energy consumption, at edge gateway not integrated with SDN. It focuses on analyzing the performance of seven state-of-the-art ML-based IDSs and their impacts on these key metrics in the face of diverse cyber threats. + +- RQ2: What are the differences in CPU usage, CPU load, and energy consumption impacts of ML-based IDS at the edge gateway with SDN integration during real-time cyber threats? + +This RQ explores how ML-based IDSs influence CPU usage, CPU load, and energy consumption at the edge gateway integrated with SDN. It involves analyzing the impacts of various ML-based IDSs on these essential performance metrics under various cyber threats. + +# 4.2. DataSet + +In our study, we used the CICIDS2017 data set [44], a highly regarded resource organized by the Canadian Institute for Cybersecurity. This dataset is recognized as one of the gold standards in cybersecurity research, capturing a broad spectrum of benign network activities and the latest cyberattacks [45]. CICIDS2017 is designed to simulate + +Table 1 Distribution of labeled IoT-SDN attacks in the dataset + +
IoT Attack LabelsNo of labeled entries
BENIGN2271320
DoS Hulk230124
Port Scan158804
DDoS128025
DoS GoldenEye10293
FTP-Patator7935
SSH-Patator5897
DoS slowloris5796
DoS Slowhtttest5499
Bot1956
Web Attack & Brute Force1507
Web Attack & XSS652
Infiltration36
Web Attack & SQL Injection21
Heartbleed11
+ +real-world network environments, making it an essential resource for researchers to test and validate advanced IDS thoroughly. The breadth and diversity of the asset highlight its importance, making it necessary for those aiming to strengthen network security paradigms. + +# 4.3. The ML-based IDS + +Numerous ML-based IDS have been developed by researchers [12] [22] [25] [46]. However, we had a significant challenge in reviewing these publications and selecting some for our study. Most did not make their solutions' applications or source code publicly available. This lack of transparency hinders the ability to experiment with these works in real IoT devices. This omission complicates, and may even prevent, the objective comparison of the proposed solutions. Consequently, to initiate our study, it became necessary to independently implement all ML-based IDS that have been previously utilized, except the ML-based IDS proposed by [20], which shared their code ML-based IDS available to researchers. In this section, we explore the implementation process of seven ML-based IDSs that we have developed: DT, KNN, RF, LSTM, CNN, and a hybrid model of LSTM and CNN. Table 3 presents a comparative analysis of the performance metrics of ML-based IDS. + +# 4.3.1. DT, KNN, RF + +We have developed and deployed DT-based IDS, RF-based IDS, and KNN-based IDS [47], each specifically designed to improve security policy. The foundation of these models is a preprocessing technique applied to the selected CICIDS 2017 dataset. The dataset features various simulated cyber-attack scenarios alongside standard traffic data. It encompasses multiple numerical attributes, including but not limited to packet sizes, flow durations, and bytes per flow, which are critical for analyzing network behavior and detecting anomalies. We applied min-max normalization as our initial preprocessing step to ensure uniformity across these diverse numerical attributes and + +Table 6 Comparison of structure and accuracy of different Neural Network models in IDS for IoT-SDN network + +
DatasetCICIDS2017CICIDS2017CICIDS2017CICIDS2017
Categories15151515
ModelLSTMLSTM+CNNCNNEIDM
Layers1011812
Parameters5638612795349748735
Structure detailsDense (64)Dense (64)Dense (120)
Dense (128)Conv1D (64, 10)Conv1D (16,30)Conv1D(80,20)
LSTM (128)Conv1D (64, 10)Conv1D (16,30)MaxPooling1D (2)
LSTM (256)MaxPooling1D (2)MaxPooling1D (2)Dense (120)
Dense (128)LSTM (128)Flatten()Dense (100)
Dense (48)LSTM (64)Dense (32)Dense (80)
Dense (15)Dense (64)Dense (15)Dense (60)
Dense (15)Dense (60)
Dense (40)
Dense (15)
Training Accuracy (%)97.72%98.77%97.92%99.57%
Testing Accuracy (%)93.86%95.75%94.74%99.56%
+ +mitigate scale discrepancies. Missing values were imputed to preserve the integrity of the data. The LabelEncoder[48] was utilized to convert labels into a format suitable for ML techniques. An essential aspect of our methodology is to divide the selected dataset into training and testing subsets. For the first RQ, we adopted $80\%$ training and $20\%$ testing, aligning with standard practices in ML model development. This adjustment was made to accommodate the different requirements of each research phase. As shown in Table 1, the dataset has five classes (Benign, DDoS, DoS, Brute force, and Port scan) with significantly more entries than the remaining ten classes, which contain fewer samples. SMOTE [49] with auto-sampling was employed to address the class imbalance issue in the dataset. This technique effectively augmented the representation of underrepresented classes, leading to a more balanced dataset for training purposes. + +# 4.3.2. CNN + +In our research, we deployed a CNN-based IDS tailored for our experimental testbed. The configuration details of the CNN model, including its layers, parameters, and architecture specifics, are outlined in Table 2. + +# 4.3.3. LSTM + +In our investigation, we implemented an LSTM-based IDS specifically for our testbeds. The detailed architecture and parameters of the LSTM model, crucial for its operation in our IDS, are thoroughly presented in Table 2. + +# 4.3.4. Hybrid model of LSTM and CNN + +In our exploration, we implemented a hybrid LSTM and CNN architectures model to create an advanced IDS tailored to our experimental setup. This architecture has already been tested in various scenarios [50][51][43]. The intricate configuration of this hybrid LSTM and CNN model, which leverages the strengths of both LSTM and CNN to enhance + +detection capabilities, is detailed in Table 2. + +The goal of using the hybridization of LSTM and CNN is twofold. First, CNN can drop the non-impactful features and select only the impactful ones (feature engineering). At the same time, it helps to learn the features in a Spatial Hierarchical manner [52]. Second, from our dataset, we got 77 features. As it is unknown which features are impactful from the given features, we applied a 2 1-dimensional CNN layer followed by a max-pooling layer to find the impactful features by learning the 10 nearby features together (kernel size 10). This helps us to create new feature representations where the impactful ones are sustained. Later, we fed these newly derived features directly to 2 LSTM layers. This step helps to learn the spatial and temporal features from CNN, resulting in feature representations presented in context and awarded. Finally, we applied 2 Dense layers to regress the feature representations generated from previous CNN and LSTM layers into 15 classes. This process helps us learn the input features more deeply and increase the classification accuracy. + +# 4.4. Experimental Design + +To address RQ1, we designed a testbed incorporating two Raspberry Pi 4 Model B units as edge gateways. Each unit is equipped with 8GB of RAM and a 1.5GHz 64-bit quad-core CPU, providing a realistic environment for evaluating the computational impact of ML-based IDS at the edge gateway. Our study evaluates the performance of seven ML-based IDS models: DT, KNN, RF, LSTM, CNN, EIDM, and a hybrid of LSTM and CNN model, selected for their established effectiveness in cybersecurity. We conducted controlled experiments in IoT-edge networks to assess these IDS models, simulating a range of cyber threats(e.g., BENIGN, DDoS, DoS, Brute force attacks, and the Port scan) using Kali Linux [53]. These experiments + +Table 3 Performance Comparison of ML-based IDS + +
DTKNNRFLSTMLSTM+CNNCNN
Accuracy0.99850.99670.99810.93860.95750.9474
Precision0.99850.99660.99800.97710.98770.9792
Recall0.99850.99670.99810.95240.96450.9611
F1-Score0.99850.99660.99800.96460.97600.9701
+ +![](images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg) +Figure 1: IoT-edge testbed topology, illustrating non-SDN and SDN-enabled setups. + +enabled us to analyze the IDS models' impact on critical performance metrics, specifically CPU usage, CPU load, and energy consumption. + +To address RQ2, we extended our testbed by integrating the edge gateway with the Ryu controller, establishing an SDN-based environment. Ryu, an open-source Python-based SDN controller [54], provides centralized traffic management, enhancing resource allocation and security analysis. We further utilized Mininet [55] to simulate a realistic SDN infrastructure consisting of eighteen hosts, six switches, and a Ryu controller, mirroring real-world network conditions. + +# 4.5. Metrics + +We evaluated CPU usage, CPU load, and energy consumption in our test beds in the context of ML-based IDS during cyber threat scenarios. We employed the ANOVA[56] to ensure an objective assessment of the performance of various ML-based IDS. + +# 4.5.1. CPU Load CPU Usage + +IDS, especially at the edge and SDN environments. CPU usage measures the percentage of the CPU's current capacity, reflecting how much processing power is dedicated to task execution. High CPU usage in an IDS can signal extensive computational demands, potentially impacting the performance of other tasks and system responsiveness, a concern in resource-limited IoT settings. Efficient IDS, especially those utilizing ML techniques, must manage CPU + +usage carefully to balance detection accuracy with minimal resource use. Excessive CPU usage can slow IDS's real-time network traffic processing, leading to delays or missed attack detection. On the other hand, CPU load indicates the number of processes waiting to be executed, providing an understanding of the CPU's workload. An increase in CPU load might suggest heavy network traffic or numerous attack attempts, highlighting the risk of system overload. Monitoring CPU load allows for early identification of potential bottlenecks, ensuring that IDS operations do not adversely impact system performance. In SDN-enabled IoT edge systems, adept CPU load management is vital to distribute tasks between IDS and other network efficient functions, ensuring optimal resource allocation and system performance. Both CPU usage and load are pivotal metrics for assessing IDS efficacy in environments where resources are constrained, e.g., at the edge gateway[57][58][59]. + +# 4.5.2. CPU Performance Metrics + +To assess the computational impact of ML-based IDS, we analyze both CPU load and CPU usage, as these metrics provide complementary insights into system performance. CPU usage is typically expressed as a percentage, indicating the proportion of processing power utilized at a given moment. In contrast, CPU load is presented as a numerical value, representing the average number of active processes waiting for CPU execution over a specific time interval. Moreover, + +while CPU load can be converted into a percentage, it provides a more detailed view of system stress, especially in multi-core environments. In a multi-core processor, a load value of 1.0 on a single-core system indicates full utilization. In contrast, on a quad-core system, a load of 1.0 suggests that only $25\%$ of the total available processing capacity is used. This distinction is crucial when interpreting our results, as high CPU load does not always imply that the system is at risk of overutilization—it depends on the number of available processing cores and the workload distribution. + +# 4.5.3. Energy Consumption + +Energy consumption, often measured in watt-hours or joules, quantifies the amount of energy a device or system expended during its operation. In IoT hardware, where many devices are battery-powered or operate in energy-constrained environments, efficient energy consumption is desirable and necessary. Devices (e.g., sensors, actuators) and even more complex IoT nodes must be designed to perform their tasks while consuming minimal energy, ensuring longevity, and reducing the need for frequent battery replacements or recharges. Moreover, IoT devices integrated with SDN bring a new dimension to the energy conversation; SDN centralizes network control, dynamically optimizing network resources based on real-time demands. Although this centralization offers enhanced flexibility and scalability, it also means that the network's core components must be energy efficient. In IoT systems, where potentially thousands or even millions of devices communicate and exchange data, even minor inefficiencies in energy consumption can accumulate, leading to significant energy drains. Integrating ML-based IDS into the edge gateway emphasizes the need to consider energy metrics critically. ML-based IDS are inherently data-intensive, requiring substantial computational resources to process large datasets for detecting and mitigating security threats. Although these systems offer invaluable security enhancements, their operation can be energy-intensive. Therefore, measuring and optimizing the energy consumption of ML-based IDS is crucial to ensure they deliver effective security measures without unduly burdening the system's energy resources. This balance is essential for maintaining the sustainability and efficiency of the edge gateway, where energy efficiency is often a key concern. + +We employed PowerTop [60], a robust tool, to precisely gauge and examine the energy consumption in two separate testbed configurations: the edge gateway integrated with SDN and without SDN. PowerTop's sophisticated monitoring capabilities allowed us to gain insights into these testbeds' energy consumption patterns and processor activity. + +# 4.5.4. Designed cyber threats + +For our research, we focused on analyzing DDoS, DoS, brute force attacks, and the port scan. We chose these specific types of attacks since they were already categorized in the employed dataset. These cyber threats are prevalent and + +pose substantial risks in the field of cybersecurity. Below, a concise summary of each is presented: + +- A Denial-of-Service (DoS): At the edge, DoS attacks are critical cybersecurity threats that disrupt device and service operations by flooding systems with excessive requests and consuming vital resources (e.g., bandwidth, processing power, and memory). This overload prevents the system from serving legitimate users, blocking access to essential operations. The distributed, resource-constrained nature of the edge makes them especially susceptible to DoS attacks. The vulnerability of these devices, coupled with their interconnectedness, means that an attack on a single device can significantly compromise the entire network's functionality and security [61]. +- A distributed denial-of-service (DDoS): A DDoS attack is a coordinated effort where multiple attackers from different locations flood a specific target, such as a server or network at the edge, with excessive traffic. The goal is to deplete the target's resources, causing severe service disruptions or a complete shutdown. Unlike traditional DoS attacks, which come from a single source, DDoS attacks are distributed across numerous sources, making them harder to defend against. This distributed nature makes DDoS attacks especially dangerous at the edge, where the interconnected and resource-constrained devices can exacerbate the attack's impact, potentially crippling the entire network [62]. +- Brute Force: A brute force attack involves an attacker systematically attempting to gain unauthorized access to a system by trying every possible combination, such as trying every key until one works. With its many interconnected devices and varying security levels, the edge is especially vulnerable to such attacks. Attackers exploit these weaknesses by repeatedly guessing passwords, encryption keys, or access codes, which seriously threatens the integrity and confidentiality of data at the edge gateway[63]. +- Port Scan:A port scan aims to identify a target system's open ports. By identifying open ports and the services running on them at the edge, attackers can uncover and exploit vulnerabilities, posing a serious threat to the security and integrity of the edge gateway[64]. + +# 4.5.5. Analysis method for energy consumption, CPU usage, CPU load + +We used ANOVA to assess our observed results. ANOVA is an indispensable statistical tool for testing the null hypothesis that posits the equivalence of group means. Our study specifically employed one-way ANOVA to examine the impact of a singular independent variable on the evaluated systems. This method relies on several crucial assumptions, including the necessity for the data to exhibit + +a normal distribution, the variances between groups being equal (homogeneity of variance), and all observations being independent. + +In addition, we conducted 15 separate tests on ML-based IDS to measure CPU load, CPU usage, and energy consumption under various cyber threats. This rigorous approach allowed us to leverage the F statistic, which quantifies the variance ratio between the means of different groups to the variance in the groups. A significant F-statistic, together with a p-value of $\leq 0.05$ , denotes statistically significant differences between group means, underscoring the efficacy of our testing methodology. By implementing this robust statistical framework, we have thoroughly evaluated the performance of various ML-based IDS models in response to different cyber threats. This analysis has allowed us to identify specific models that demonstrate resilience or efficiency against multiple attacks and require increased computational resources or energy consumption. While CPU load is a key performance metric for IDS evaluation, it is also crucial to consider its impact on IoT device availability and reliability. Excessive CPU consumption by an IDS can degrade the device's primary functions, leading to slow response times or system failures. This is especially critical in real-time applications such as healthcare, industrial automation, and smart home security, where device downtime can have serious consequences. An IDS must enhance security without inadvertently causing an attack such as a DDoS condition due to resource exhaustion. In addition, through these fifteen iterations of testing, ANOVA has enabled us to validate significant differences in IDS performance metrics (e.g., detection accuracy, false positive rates), CPU load, CPU usage, and energy consumption across diverse scenarios. This methodological approach provides a detailed examination of how different IDS models respond to varied threats, establishing a solid statistical foundation for assessing the efficacy of each model in a controlled environment. By distinguishing between performance differences attributable to the models' inherent capabilities and those due to random variation, our use of ANOVA has proven to be critical. It aids in identifying the most resource-efficient and reliable IDS, thereby guiding the selection process for optimal cybersecurity defenses and enhancing our management and understanding of IDS performance under cyber threat conditions [65] [66]. + +# 4.6. TestSuite + +To initiate the research work presented in this paper and to facilitate the environment for further research and testing, we introduce a versatile test suite designed to experiment with and evaluate ML-based IDS in SDN environments. Unlike conventional experimental testbeds, our test suite is an extensible framework equipped with predefined APIs and a selection of pre-integrated algorithms, facilitating the seamless integration and testing of novel IDS models. Another good contribution to our test suite is that users can execute their experiments on it without Raspberry Pi or any other hardware support. As discussed in the previous + +paragraph, the test suite is developed following the plug-in architecture feature. This ensures that the user can easily integrate their algorithm into the test suite and test the accuracy, energy consumption, and CPU usage with or without security threats. Users can create their own IoT-SDN network and complexity in the network and generate any number of security breaching attacks. This approach not only simplifies the validation process of IDS models in a realistic network scenario but also encourages the exploration of innovative IDS methodologies by providing a solid foundation of tools and benchmarks. We have made the test suite available with the same configuration discussed in Section 4.4. We integrated the same tools for creating an IoT-SDN network, generating security attacks, and measuring IDS accuracy, energy consumption, CPU usage, etc. Through its design, the test suite aims to advance the development and thorough evaluation of cutting-edge IDS solutions, significantly enhancing network security in the era of SDN. + +# 5. Experimental Results and Analysis + +This section discusses our experimental results and findings. After presenting our results, we conducted an in-depth statistical analysis using ANOVA. This analysis aims to illuminate the implications and insights that emerge from the experimental results, providing an understanding of the efficacy and nuances of each IDS under study. + +# 5.1. Experimental finding for RQ1 CPU Load: + +We tested ML-based IDSs under various cyberattack scenarios to assess their impact and strain on our testbed. The types of cyberattacks we considered include DDoS, DoS, brute force attacks, and the port scan. Moreover, we conducted the ANOVA focusing on CPU load variations in our testbed. Figure 2 illustrates a comparative analysis of the average CPU load among different ML-based IDS models in the presence of various types of cyberattacks. The DL-based IDS (CNN, LSTM, combined model of LSTM and CNN, and EIDM) consistently maintain lower CPU loads across all attack types, demonstrating their efficiency in resource utilization during inference. In contrast, traditional ML-based IDS such as KNN, DT, and RF exhibit significantly higher CPU loads, especially under brute force and DDoS attacks, with KNN and DT being the most resource-intensive. This is because DL models, such as CNN and LSTM, efficiently handle computations in parallel and are optimized for inference. In contrast, traditional models (e.g., KNN and DT) require more repeated, resource-heavy calculations, such as distance computations in KNN or recursive splitting in DTs, especially under large-scale attacks. + +# Statistical Findings: + +We conducted an ANOVA, and the results presented in Table 4 illuminate significant differences in CPU load among diverse ML-based IDS under DDoS, underscored by F-statistic of 60.40 and a p-value $< 0.05$ . This F-statistic delineates + +![](images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 2: The Average CPU load of ML-based IDS under cyber threats. + +Table 4 ANOVA results: CPU Load for ML-based IDS under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.873601.6460.40< 0.05
in groups915426.4959.63
Total9727036.36278.73
+ +the contrast in CPU load variance across ML-based IDSs against the variance in, highlighting a significant influence of IDS selection on CPU load. The remarkably low p-value corroborates this finding, conclusively demonstrating the substantial differences in CPU load among the IDSs. Furthermore, we observed similar p-values $(< 0.05)$ across other attacks, including brute force, DoS, and the port scan, so we do not report them. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS under different cyber threats. + +# Finding + +DL-based IDS, such as CNN, LSTM, and hybrids, perform more efficiently in managing computational demands across diverse types of cyber threats than traditional ML-based IDS, such as KNN, DT, and RF, as they exhibit higher CPU loads at the edge. This pattern suggests that DL-based IDS' intrinsic efficiency is not attack-specific but rooted in their architecture, making them especially suited for real-time applications at edge gateway. These results are expected, as traditional ML-based IDS (e.g., KNN, DT, RF) perform computationally expensive operations during inference, unlike DL-based IDS, which optimizes processing through parallelization and learned feature extraction. + +# CPU Usage: + +Figure 3 compares the average CPU usage of various ML-based IDS models under different cyberattacks. The KNN model consistently exhibits the highest CPU usage across all attack types, indicating its high computational demand, which limits its use in resource-constrained environments. The RF and DT models are also CPU-bound, though they are less intensive than KNN. In contrast, the LSTM model demonstrates the lowest CPU usage, making it the most efficient option for scenarios where minimizing resource consumption is critical. The hybrid of the LSTM and CNN model, along with the CNN and EIDM models, offer a balance between inference accuracy and computational efficiency, making them viable choices for environments with moderate resource availability. + +# Statistical Findings: + +Table 5 presents our ANOVA results. Our results reveal significant differences in CPU load among diverse ML-based IDS under DDoS, as evidenced by a compelling F-statistic of 60.39 and a p-value $< 0.05$ . This F-statistic highlights the variance in CPU load across IDS groups compared to the variance in, underscoring a significant impact of IDS selection on CPU load. The exceedingly small p-value further supports this conclusion. Moreover, we observed similar p-values (below 0.05) across various cyber threats, such as brute force, DoS, and the port scan, so we do not report those results. + +![](images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 3: The Average CPU usage of ML-based IDS under cyber threats. + +Table 5 ANOVA results: CPU Usage for ML-based IDS under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.863601.6460.39< 0.05
in groups915426.4959.62
Total9727036.36278.73
+ +# Finding + +Our analysis reveals that traditional ML-based IDS such as KNN, DT, and RF exhibit increased CPU usage under various cyber threats, thus posing challenges for the edge. Also, LSTM and other DL-based IDS exhibit lower CPU demands. This consistent efficiency across various attacks highlights the benefit of adopting DL-based IDS at the edge gateway. The increased CPU usage of KNN, DT, and RF reflects their reliance on instance-based and tree-splitting operations, which require repeated evaluations. In contrast, DL models efficiently process data in structured layers, reducing computational strain. + +# Energy consumption: + +Figure 4 shows that the LSTM and DT models are the most energy-efficient across different types of cyberattacks, consistently exhibiting the lowest energy consumption. The CNN model also performs efficiently, with slightly higher energy usage. The LSTM, CNN model hybrid, and EIDM have moderate energy consumption, balancing complexity and efficiency. In contrast, the KNN model has the highest energy consumption across all scenarios, making it less suitable for energy-constrained environments. The RF model falls in between, with moderate energy demands. + +# Statistical Findings: + +We conducted the ANOVA, and the results presented in Table 6 reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by F-statistic of 57.44 and a p-value of $< 0.05$ . This F-statistic delineates the contrast in energy consumption variance across the group of IDSs against the variance in, highlighting a significant influence of IDS selection on energy consumption. The extremely low p-value further supports this conclusion, conclusively demonstrating the substantial differences in energy consumption among the IDSs. In addition, we observed similar p-values ( $< 0.05$ ) for other cyber threats, such as brute force, DoS, and the port scan, so we do not report the results. This observation demonstrates significant differences in energy consumed among various ML-based IDS when faced with differing cyber threats. + +![](images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg) +Figure 4: The Average Energy consumption of ML-based IDS under cyber threats. + +Table 6 ANOVA results: energy consumption for ML-based IDS under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups647732.077955.3457.44< 0.05
in groups9813571.72138.48
Total10461303.80589.45
+ +# Finding + +Our analysis concludes a marked discrepancy in energy consumption, with traditional ML-based IDS such as KNN, RF, and DT exhibiting significantly higher energy consumption under cyber threats such as DDoS and brute force, a drawback for energy-constrained at the edge. In contrast, DL-based IDS models, LSTM, CNN, EIDM, and their hybrids excel in energy efficiency, making them the preferable choice for the edge. Traditional ML models' higher energy consumption results from their iterative computations and lack of optimized inference paths, making them less viable for real-time IoT applications where power efficiency is crucial. + +# 5.2. Experimental finding for RQ2 + +This section presents our experimental results for IoT-edge devices with SDN integration during real-time cyber threats. + +# CPU Load: + +In Figure 5, we illustrate the CPU load of various ML-based IDS models under different cyberattacks in an SDN-enabled at the edge gateway. The analysis shows that KNN and DT models have the highest CPU load, especially during DDoS and DoS, indicating significant resource demands at + +the edge. Conversely, the LSTM model demonstrates the lowest CPU load, highlighting its efficiency in resource management. The CNN model also performs efficiently but not as well as LSTM. The LSTM and CNN model hybrid, similar to EIDM, offers balanced performance, making them suitable for scenarios where moderate CPU efficiency is required at the edge. + +# Statistical Findings: + +We conducted an ANOVA for the case of the DDoS attack, and the results are presented in Table 7. The results reveal significant differences in CPU load among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 142.57 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU load across IDSs compared to the variance in them, indicating a significant impact of IDS selection on CPU load. In addition, consistent p-values $(< 0.05)$ were observed across other cyber threats, including brute force, DoS, and the port scan, and we do not report the result. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS when subjected to different cyber threats. + +![](images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 5: The Average CPU load of ML-based IDS under cyber threats. + +Table 7 ANOVA results: CPU load for ML-based IDS in SDN under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61184.21197.36142.57< 0.05
in groups91125.971.38
Total971310.1813.50
+ +# Finding + +The findings demonstrate that traditional ML-based IDS, e.g., DT, exhibit elevated loads under DDoS and DoS. In contrast, DL-based IDSs, including EIDM, LSTM, CNN, and their hybrids, demonstrate superior energy efficiency, making them suitable for SDN-enabled at the edge gateway. The integration of SDN helps balance network resource allocation. Yet, traditional ML-based IDS still exhibit higher CPU load due to their design, reinforcing the efficiency advantage of DL-based models in dynamic network environments. + +# CPU Usage: + +Figure 6 shows that CPU usage across various ML-based IDS models in an SDN-enabled edge gateway is fairly consistent across different attack scenarios. Only minor variations are observed, as CNN, LSTM, and hybrid versions demonstrate relatively lower CPU usage, indicating efficient resource management. The DT, KNN, and RF models also show consistent CPU usage across attacks. The EIDM model balances efficiency and performance well. + +# Statistical Findings: + +We conducted an ANOVA for the results we got for ML-based IDS in SDN under the DDoS attack. The results presented in Table 8 reveal significant differences in CPU + +usage among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 5.94 and a p-value of $< 0.05$ . This F-statistic highlights the variance in CPU usage across the group of IDSs compared to the variance in, indicating a significant impact of IDS selection on CPU usage. In addition, we observed a consistently low p-value $(< 0.05)$ for other examined cyber threats (not reported in the paper), including brute force, DoS, and port scan, reinforcing the presence of marked differences in CPU usage among diverse ML-based IDS when subjected to different cyber threats. + +![](images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 6: The Average CPU usage of ML-based IDS under cyber threats. + +Table 8 ANOVA results: CPU usage for ML-based IDS in SDN under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups627.974.665.94< 0.05
in groups9171.320.78
Total9799.301.02
+ +# Finding + +In the context of SDN-enhanced IoT, deploying DL-based IDS with advanced models such as CNN, LSTM, EIDM, and their hybrids demonstrates efficient energy consumption. These models achieve reduced CPU usage against brute force and port scan, benefiting from the centralized resource optimization afforded by SDN. Nonetheless, the complexity of DDoS and DoS presents a significant challenge, necessitating increased computational resources. Although SDN optimizes network operations, IDS models such as KNN and RF remain resource-intensive due to their frequent computational overhead. At the same time, DL-based IDS maintains efficiency through batch processing and learned representations. + +# Energy consumption: + +Figure 7 depicts the average energy consumption of ML-based IDS models under different attacks in an SDN environment. The results indicate that traditional ML models consume more energy, especially during port scans, e.g., DT, KNN, and RF. In contrast, the EIDM model consistently shows lower energy consumption across all attack types, highlighting its efficiency. The LSTM and CNN models display moderate energy usage, including their hybrid + +version. Compared to non-SDN environments, the increased energy consumption in the SDN setup is attributed to the SDN controller's active role in traffic management and threat response, which demands more energy resources. + +# Statistical Findings: + +We applied ANOVA on energy consumption data across ML-based IDSs in SDN under DDoS. The results, presented in Table 9, reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by an impressive F-statistic of 18.27 and a p-value of $< 0.05$ . This F-statistic highlights the variance in energy consumption across a group of IDSs compared to the variance in, indicating a significant impact of IDS selection on energy consumption. Moreover, a consistently low p-value ( $< 0.05$ ) was observed across other cyber threats, including brute force, DoS, and port scan, so we do not report the results here. This highlights marked differences in CPU usage among diverse ML-based IDS when subjected to examined cyber threats. + +![](images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 7: The Average Energy consumption of ML-based IDS under cyber threats. + +Table 9 ANOVA results: Energy consumption for ML-based IDS in SDN under DDoS. + +
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61263.26210.5418.27< 0.05
in groups911048.2111.51
Total972311.4823.82
+ +# Finding + +The findings accentuate the distinct energy efficiency profiles of ML-based IDSs when exposed to various cyber threat scenarios. During brute force and the port scan, traditional ML-based IDS such as DT, KNN, and RF are observed to have higher energy consumption. This indicates that these models are not energy-efficient under the examined conditions due to their complex computational frameworks. On the other hand, DL-based IDS and the EIDM show markedly superior energy efficiency. The reduced energy footprint of DL-based IDS is especially advantageous in the context of the SDN-enabled at the edge, where low energy consumption is crucial due to device constraints and the need for long-term, autonomous operation. The reduction in energy consumption observed in DL-based IDS when integrated with SDN highlights the benefits of centralized network control and optimized workload distribution, making them a more sustainable choice for IoT security. + +# 5.3. Analyzing the Impact of SDN on CPU Usage, Load, and Energy Efficiency in ML-Based IDS + +Figure 8 demonstrates that integrating SDN with ML-based IDS in the edge gateway significantly improves resource efficiency, reducing energy consumption, CPU usage, and CPU load. The most substantial improvement is in CPU usage, where DL-based IDS, e.g., LSTM and CNN, outperform traditional ML models by efficiently handling complex computations through parallel processing. Additionally, SDN integration reduces CPU load by balancing workloads, essential for real-time threat detection in edge gateway. The observed reduction in energy consumption further highlights the approach's suitability for battery-powered edge gateway, confirming its scalability and practicality for real-world applications. + +# 6. ML-Based IDS vs. Signature-Based IDS (Snort) + +This section compares our ML-based IDS models and the signature-based Snort IDS to evaluate the performance improvements achieved by leveraging ML-based IDS over traditional detection systems. This comparison is essential to highlight the advantages of ML-based approaches regarding resource efficiency, scalability, and adaptability, especially in edge gateway. + +![](images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg) +Evaluating Machine Learning-driven Intrusion Detection System +Figure 8: Reduction in energy consumption, CPU usage, and CPU load for ML-based IDS models with SDN integration in edge gateway. + +The results presented in Table 10 provide a comparative analysis of our ML-based IDS models against the signature-based Snort IDS discussed in other research. + +Regarding CPU usage, Snort IDS shows high utilization under heavy traffic due to its reliance on predefined rules and signature matching. In contrast, the ML-based IDS models demonstrate better CPU efficiency. While traditional ML models, e.g., DT and KNN, have higher CPU usage because of iterative computations, DL-based IDS, e.g., LSTM, CNN, and a hybrid of LSTM and CNN, EIDM exhibits lower CPU usage. This is primarily due to DL-based IDS's ability to process data in batches and leverage parallel processing for real-time threat detection. For energy consumption, Table 10 shows that Snort IDS consumes more energy, especially in IoT networks requiring multiple containers. However, our ML-based IDS models, especially DL architectures, e.g., LSTM and EIDM, demonstrate superior energy efficiency. These models optimize resource usage and process data efficiently, making them suitable for resource-constrained edge gateway and highlighting their scalability advantages. Finally, in terms of CPU load, Table 10 indicates that earlier versions of Snort IDS suffer from high CPU load on a single core because of their single-threaded architecture. Although newer versions introduce multi-threading, they still encounter processing bottlenecks under heavy traffic. Conversely, the ML-based IDS models distribute the CPU load more effectively across multiple cores. DL-based IDS, especially LSTM and hybrid architectures, achieve the lowest CPU load levels due to their parallel execution capabilities and efficient handling of sequential data. + +# 7. Discussion + +Our investigations explored the performance metrics of ML-based IDS with various models, especially in IoT-edge devices with and without SDN integration. Our study was primarily evaluating the impact of these models on CPU load, CPU usage, and energy consumption amidst diverse + +cyberattack scenarios. The empirical findings revealed significant disparities in resource utilization across different ML-based IDS, shedding light on crucial aspects of their deployment in IoT devices integrated with SDN. The KNN, DT, and RF significantly exhibited higher CPU load, CPU usage, and energy consumption, especially under specific types of cyberattacks. While these models are adept at identifying threats, their resource-intensive nature could pose challenges in the IoT context, where computational resources are often limited. This could lead to diminished performance or instability in environments with constrained resources. Specifically, KNN's higher variance in CPU load and energy consumption, as observed in Tables 4 and 5, stems from its lazy learning approach. Unlike other models, KNN does not build a generalized model during training but instead stores the entire dataset and computes distances at query time. This results in increased processing demands, leading to fluctuations in resource utilization. Such behavior makes KNN less suitable for real-time IDS applications in resource-constrained IoT networks[72] [73]. While CPU load significantly impacts energy consumption, it is not the sole factor. Memory operations, network activity, peripheral devices, and thermal management also contribute to power usage in IoT devices. High data transmission rates and active sensors can increase energy demands, while sustained CPU load may trigger additional energy consumption for cooling mechanisms. Although a strong correlation between CPU load and energy consumption is expected, these factors introduce variations across IDS models. Optimizing IDS efficiency can help balance security and resource constraints in IoT networks. Conversely, the CNN and LSTM models demonstrated greater efficiency in resource utilization. While their architectures are sophisticated and adept at processing complex data structures, they appear to optimize the computational load during inference when employed in IDS. This makes them more suitable for scenarios where resource conservation is critical. However, the complexity of these models introduces its own set of challenges, especially + +Table 17 Comparative Resource Utilization of ML-Based IDS and Snort IDS Based + +
MetricSnort IDSML-Based IDS (Our Findings)
CPU Usage- High Traffic Conditions: CPU usage can reach its maximum during initialization with many active rules [67]. +- Multi-Core Systems: Snort 3.0 utilizes a significant portion of CPU resources on a multi-core processor [68] [69].- Traditional ML Models (DT, KNN, RF): Tend to exhibit higher CPU usage during real-time cyber threats, especially those requiring intensive computations. +- DL-Based Models (CNN, LSTM, Hybrid of LSTM and CNN and EIDM): Show lower CPU usage compared to traditional ML models, with LSTM models demonstrating the most efficient utilization due to sequential data processing and parallelization.
Energy Consumption- IoT Deployment: Deployment of Snort on IoT gateways results in considerable energy consumption [70].- Traditional ML-based IDS: Generally consume more energy during inference cycles due to repetitive computations. +- DL-Based Models: Exhibit better energy efficiency, especially models that combine convolutional and sequential layers, benefiting from optimized processing structures.
CPU Load- Single-Core Utilization: Older Snort versions (pre-3.0) lead to high load on a single core under heavy traffic [71]. +- Multi-Core Systems: Updated versions distribute the load but still face processing bottlenecks under extensive traffic [71].- Traditional ML-based IDS: Often show higher CPU load during complex attack scenarios. +- DL-Based Models: Maintain a lower CPU load, benefiting from parallel processing capabilities, with hybrid models showing the most balanced load distribution.
+ +in terms of training and ongoing maintenance in the dynamic landscape of IoT devices integrated with SDN. + +The balance between detection efficiency and resource consumption is especially critical at edge gateway, where devices often have limited processing power and energy reserves. This balance is closely tied to several United Nations Sustainable Development Goals (SDGs), especially SDG 9 (Industry, Innovation, and Infrastructure), SDG 11 (Sustainable Cities and Communities), and SDG 13 (Climate Action). Optimizing IDS deployment in smart cities strengthens cybersecurity infrastructure, directly supporting SDG 9 while fostering resilient, sustainable urban environments in line with SDG 11. Furthermore, by prioritizing energy-efficient IDS solutions, this research contributes to SDG 13, promoting responsible resource consumption and mitigating the environmental impact of growing IoT networks [74]. + +To aid IoT developers in selecting appropriate IDS solutions, we provide detailed guidelines in Table 11 and Table 12, outlining the performance trade-offs of seven different ML-based IDS models for IoT devices examined in this paper, both with and without SDN integration. These insights enable developers to make informed decisions, ensuring the optimal balance between security and resource efficiency during application development. We use graphical indicators (smiley faces) instead of numerical values to provide an intuitive, high-level comparison of IDS performance. This visual approach simplifies decision-making for IoT developers, aligning with similar methodologies used in prior work [75]. Moreover, all corresponding numerical values + +related to CPU usage, CPU load, and energy consumption are presented in the Figures and Tables in Section 5. + +On the other hand, to the best of our knowledge, only Tekin et al. [12] have explored a similar direction in evaluating the performance of ML-based IDS in IoT systems. However, our study takes a fundamentally different approach, especially in how computational resources are classified and utilized, which plays a critical role in the effectiveness and scalability of IoT systems. While Tekin et al. focus on energy consumption and inference times using Raspberry Pi as an IoT device, our study emphasizes the advantages of processing data at the edge, especially regarding energy efficiency, CPU load, and usage. We show how models such as DT and RF benefit from edge processing, reducing latency and improving responsiveness, especially when combined with SDN, which optimizes network traffic and resource allocation. Our findings underscore the importance of balancing computational tasks across the network using SDN to maintain performance, unlike Tekin et al. [12], who do not explore the impact of edge computing or SDN integration. + +# 8. Threat and validity + +Empirical research inevitably encounters issues related to the validity of findings. In light of this, the present section seeks to identify and discuss possible threats to our research's validity, per the recommendations of Wohlin et al. [76]. + +Table 11 Guideline for selecting seven ML-based IDS in edge gateway. + +
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
+ +Table 12 Guideline for selecting seven ML-based IDS in SDN-edge gateway. + +
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
+ +The energy consumption and CPU usage in all ML-based IDS lowered during the brute force attack and port scan. + +# 8.1. Internal Threats + +During our empirical study on ML-based IDS in the context of IoT devices with IoT devices integrated with SDN, we recognized the existence of internal obstacles that impact the credibility of our findings. The precision of our performance measures is of utmost importance, namely the measurement of CPU load, CPU usage, and energy consumption in these intricate network settings. The complex characteristics of IoT devices and the adaptable structure of SDN provide significant difficulties in guaranteeing accurate and dependable performance evaluations. To address these concerns, we performed fifteen experiments on our testbeds. To improve the trustworthiness of our results in the context of SDN and IoT, we utilized average values to reduce the impact of network or hardware differences and ambient factors. In addition, the cyber threat simulations were conducted using highly practiced cyber security testing mechanisms in academic research and industries in IoT-edge devices integrated with SDN. This work aims to tackle internal risks associated with the setup and precision of ML-based IDS, improving their usefulness and significance in these fast-advancing technical fields. + +# 8.2. External Threats: + +The landscape of network security, especially in IoT-edge devices and IoT-edge devices integrated with SDN realms, is increasingly challenged by external threats. These range from sophisticated cyberattacks such as DoS, DDoS, and brute force attacks to more subtle, yet equally harmful, reconnaissance methods such as a port scan. These threats highlight the urgent need for robust and adaptable IDS solutions. Integrating ML into IDS presents promising advancements in threat detection and mitigation. However, this integration faces challenges due to the complexity of IoT-edge devices, which are marked by numerous interconnected devices, and the dynamic nature of SDN architectures. IDS solutions must be precise in threat detection while also being resource-efficient. Our research evaluates ML-based IDS based on CPU usage, CPU load, and energy consumption, especially under real-time cyber threats. These metrics are + +vital to ensure that ML-based IDS are effective in protecting networks against external threats and sustainable in their operation. They help maintain a crucial balance between security and performance in the complex ecosystems of IoT devices and IoT devices integrated with SDN. Additionally, to ensure the transparency and reproducibility of our study, we have provided detailed information about the experimental setup and made our testbed and results publicly available for further research [77]. By adopting these measures, we have attempted to provide robust validation and increase the inability to reject our findings among practitioners and researchers. + +# 9. Conclusion + +This paper presents a comparative analysis of the ML-based IDS in IoT-edge devices and IoT-edge devices integrated with SDN under different cyberattack scenarios, resulting in comprehension. In IoT systems, conventional ML models (e.g., KNN and DT) often experience increased CPU load and CPU usage, especially when subjected to DoS and DDoS cyber threats. This suggests that these models have limits in resource-limited situations. In contrast, DL-based IDS (e.g., CNN and LSTM) exhibit reduced CPU usage, indicating improved efficiency and compatibility with IoT security. A consistent energy consumption pattern was identified across attack types in both scenarios, encompassing advanced neural networks and conventional methods. The consistent energy efficiency of these models, independent of their computing complexity, highlights their efficacy and long-term viability for use in different network environments. The findings emphasize the significance of choosing ML-based IDS according to their computational efficiency and energy consumption to achieve optimal performance in networks with limited resources. It is imperative to thoroughly evaluate the scalability and robustness of ML-based IDS in future research, especially in more significant and more complex network environments. This assessment will explain their ability to adjust to changing cyber threats. Furthermore, it is crucial to evaluate the influence of new technologies, e.g., 5G and edge computing, on the efficacy + +and suitability of ML-based IDS in advanced network infrastructures. + +Future research directions should pivot towards optimizing ML-based IDS for enhanced scalability, real-time processing, and energy consumption. The overarching challenge is to develop effective threat detection models that minimally impact system resources. Furthermore, integrating these models into existing IoT devices and IoT devices integrated with SDN infrastructures presents additional challenges, including ensuring compatibility, scalability, and ease of maintenance. + +# A. Conflict of interest + +The authors declare that they have no known conflict of interest or personal relationships that could have appeared to influence the work reported in this paper. + +# B. Acknowledgement + +The authors thank Dr. Karim A. Emara et al. for collaborating to share the EIDM-IDS source code. + +# References + +[1] D. G. Chowdhry, R. Verma, M. Mathur, The Evolution of Business in the Cyber Age: Digital Transformation, Threats, and Security, CRC Press, 2020. +[2] B. Kaur, S. Dadkhah, F. Shoeleh, al., Internet of things (iot) security dataset evolution: Challenges and future directions, Internet of Things (2023) 100780. +[3] S. Hadzovic, S. Mrdovic, M. Radonjic, A path towards an internet of things and artificial intelligence regulatory framework, IEEE Communications Magazine (2023). +[4] K. L. M. Ang, J. K. P. Seng, E. Ngharamike, Towards crowdsourcing internet of things (crowd-iot): Architectures, security, and applications, Future Internet 14 (2) (2022) 49. +[5] M. Ahmid, O. Kazar, A comprehensive review of the internet of things security, Journal of Applied Security Research 18 (3) (2023) 289-305. +[6] P. Mall, R. Amin, A. K. Das, M. T. Leung, K.-K. R. Choo, Puf-based authentication and key agreement protocols for IoT, wsns, and smart grids: a comprehensive survey, IEEE Internet of Things Journal 9 (11) (2022) 8205-8228. +[7] A. Lakhlan, M. A. Mohammed, K. H. Abdulkareem, M. M. Jaber, J. Nedoma, R. Martinek, P. Zmij, Delay optimal schemes for internet of things applications in heterogeneous edge cloud computing networks, Sensors 22 (16) (2022) 5937. +[8] P. Malhotra, Y. Singh, P. Anand, Bangotra, al, Internet of things: Evolution, concerns and security challenges, Sensors 21 (5) (2021) 1809. +[9] A. Djenna, S. Harous, D. E. Saidouni, Internet of things meet the internet of threats: New concern cyber security issues of critical cyber infrastructure, Applied Sciences 11 (10) (2021) 4580. +[10] M. Almiani, A. AbuGhazleh, A. Al-Rahayfeh, S. Atiewi, A. Razaque, Deep recurrent neural network for IoT intrusion detection system, Simulation Modelling Practice and Theory 101 (2020) 102031. +[11] T. Rajmohan, P. H. Nguyen, N. Ferry, Research landscape of patterns and architectures for IoT security: a systematic review, in: 2020 46th Euromicro conference on software engineering and advanced applications (SEAA), IEEE, 2020, pp. 463-470. +[12] N. Tekin, A. Acar, A. Aris, A. S. Uluagac, V. C. Gungor, Energy consumption of on-device machine learning models for IoT intrusion detection, Internet of Things 21 (2023) 100670. + +[13] A. Hakiri, A. Gokhale, P. Berthou, D. C. Schmidt, T. Gayraud, Software-defined networking: Challenges and research opportunities for future internet, Computer Networks 75 (2014) 453-471. +[14] K. H. K. Reddy, A. K. Luhach, V. V. Kumar, S. Pratihar, D. Kumar, D. S. Roy, Towards energy efficient smart city services: A software defined resource management scheme for data centers, Sustainable Computing: Informatics and Systems 35 (2022) 100776. +[15] A. Montazerolghaem, Software-defined internet of multimedia things: Energy-efficient and load-balanced resource management, IEEE Internet of Things Journal 9 (3) (2021) 2432-2442. +[16] J. Liu, H. Shen, H. S. Narman, W. Chung, Z. Lin, A survey of mobile crowdsensing techniques: A critical component for the internet of things, ACM Transactions on Cyber-Physical Systems 2 (3) (2018) 1-26. +[17] B. B. Gupta, M. Quamara, An overview of internet of things (iot): Architectural aspects, challenges, and protocols, Concurrency and Computation: Practice and Experience 32 (21) (2020) e4946. +[18] A. A. Alsulami, Q. A. Al-Haija, A. Tayeb, Anomaly-based intrusion detection system for IoT networks with improved data engineering (2022). +[19] I. Mukherjee, N. K. Sahu, S. K. Sahana, Simulation and modeling for anomaly detection in IoT network using machine learning, International Journal of Wireless Information Networks 30 (2) (2023) 173-189. +[20] O. Elnakib, E. Shaaban, M. Mahmoud, K. Emara, Eidm: deep learning model for IoT intrusion detection systems, The Journal of Supercomputing (2023) 1-21. +[21] M. Douiba, S. Benkirane, A. Guezzzaz, M. Azrour, An improved anomaly detection model for IoT security using decision tree and gradient boosting, The Journal of Supercomputing 79 (3) (2023) 3392-3411. +[22] S. M. Kasongo, Y. Sun, A deep learning method with wrapper-based feature extraction for wireless intrusion detection system, Computers & Security 92 (2020) 101752. +[23] A. Verma, V. Ranga, Machine learning-based intrusion detection systems for IoT applications, Wireless Personal Communications 111 (2020) 2287-2310. +[24] Y. Otoum, D. Liu, A. Nayak, Dl-ids: a deep learning-based intrusion detection framework for securing IoT, Transactions on Emerging Telecommunications Technologies 33 (3) (2022) e3803. +[25] T. Gaber, A. El-Ghamry, A. E. Hassanien, Injection attack detection using machine learning for smart IoT applications, Physical Communication 52 (2022) 101685. +[26] U. Sachdeva, P. R. Vamsi, Analysis of deep learning models for anomaly detection in time series IoT sensor data, in: Proceedings of the 2022 Fourteenth International Conference on Contemporary Computing, 2022, pp. 54-62. +[27] K. Nimmy, M. Dilraj, S. Sankaran, K. Achuthan, Leveraging power consumption for anomaly detection on IoT devices in smart homes, Journal of Ambient Intelligence and Humanized Computing (2022) 1-12. +[28] R. Chaganti, W. Suliman, V. Ravi, A. Dua, Deep learning approach for sdn-enabled intrusion detection system in IoT networks, Information 14 (1) (2023) 41. +[29] M. M. Isa, L. Mhamdi, Hybrid deep autoencoder with random forest in native sdn intrusion detection environment, in: ICC 2022-IEEE International Conference on Communications, IEEE, 2022, pp. 1698-1703. +[30] P. T. Duy, H. Do Hoang, N. H. Khoa, V.-H. Pham, et al., Fool your enemies: Enable cyber deception and moving target defense for intrusion detection in sdn, in: 2022 21st International Symposium on Communications and Information Technologies (ISCIT), IEEE, 2022, pp. 27-32. +[31] M. A. Bouke, A. Abdullah, S. H. ALshatebi, M. T. Abdullah, E2ids: An enhanced intelligent intrusion detection system based on decision tree algorithm, Journal of Applied Artificial Intelligence 3 (1) (2022) 1-16. + +[32] L. A. C. Ahakonye, C. I. Nwakanma, J.-M. Lee, D.-S. Kim, Scada intrusion detection scheme exploiting the fusion of modified decision tree and chi-square feature selection, Internet of Things 21 (2023) 100676. +[33] M. Hammad, N. Hewahi, W. Elmedany, Mmm-rf: A novel high accuracy multinomial mixture model for network intrusion detection systems, Computers & Security 120 (2022) 102777. +[34] K. Albulayhi, Q. Abu Al-Haija, S. A. Alsuhibany, A. A. Jillepalli, M. Ashrafuzzaman, F. T. Sheldon, Iot intrusion detection using machine learning with a novel high performing feature selection method, Applied Sciences 12 (10) (2022) 5015. +[35] H. Yang, S. Liang, J. Ni, H. Li, X. S. Shen, Secure and efficient km classification for industrial internet of things, IEEE Internet of Things Journal 7 (11) (2020) 10945-10954. +[36] A. D. Afifaturahman, M. Firmansyah, Perbandingan algorithm k-nearest neighbour (knn) dan naive bayes pada intrusion detection system (ids), Innovation in Research of Informatics (INNOVATICs) 3 (1) (2021). +[37] F. Z. Belgrana, N. Benamrane, M. A. Hamaida, A. M. Chaabani, A. Taleb-Ahmed, Network intrusion detection system using neural network and condensed nearest neighbors with selection of nsl-kdd influencing features, in: 2020 IEEE International Conference on Internet of Things and Intelligence System (IoTaIS), IEEE, 2021, pp. 23-29. +[38] Y. Yan, L. Qi, J. Wang, Y. Lin, L. Chen, A network intrusion detection method based on stacked autoencoder and LSTM, in: ICC 2020-2020 IEEE International Conference on Communications (ICC), IEEE, 2020, pp. 1-6. +[39] M. D. Hossain, H. Inoue, H. Ochiai, D. Fall, Y. Kadobayashi, Lstmbased intrusion detection system for in-vehicle can bus communications, IEEE Access 8 (2020) 185489-185502. +[40] A. El-Ghamry, A. Darwish, A. E. Hassanien, An optimized cnn-based intrusion detection system for reducing risks in smart farming, Internet of Things 22 (2023) 100709. +[41] S. Jamshidi, A. Nikanjam, M. A. Hamdaqa, F. Khomh, Attack detection by using deep learning for cyber-physical system, in: Artificial Intelligence for Cyber-Physical Systems Hardening, Springer, 2022, pp. 155–179. +[42] P. Sun, P. Liu, Q. Li, C. Liu, X. Lu, R. Hao, J. Chen, Dl-ids: Extracting features using cnn-lstm hybrid network for intrusion detection system, Security and communication networks 2020 (2020) 1–11. +[43] A. Halbouni, T. S. Gunawan, M. H. Habaebi, M. Halbouni, M. Kartiwi, R. Ahmad, Cnn-lstm: hybrid deep neural network for network intrusion detection system, IEEE Access 10 (2022) 99837-99849. +[44] D. Stiawan, M. Y. B. Idris, A. M. Bamhdi, R. Budiarto, et al., Cicids-2017 dataset feature analysis with information gain for anomaly detection, IEEE Access 8 (2020) 132911–132921. +[45] R. Panigrahi, S. Borah, A detailed analysis of cicids2017 dataset for designing intrusion detection systems, International Journal of Engineering & Technology 7 (3.24) (2018) 479-482. +[46] A. A. Alsulami, Q. Abu Al-Haija, A. Tayeb, A. Alqahtani, An intrusion detection and classification system for IoT traffic with improved data engineering, Applied Sciences 12 (23) (2022) 12336. +[47] L. Yang, A. Moubayed, I. Hamieh, A. Shami, Tree-based intelligent intrusion detection system in internet of vehicles, in: 2019 IEEE global communications conference (GLOBECOM), IEEE, 2019, pp. 1-6. +[48] Great Learning, Label encoding in python, [link], accessed: 2024-03-21 (n.d.). +[49] Analytics Vidhya, Overcoming class imbalance using smote techniques, [link], accessed: 2024-03-21 (2020). +[50] T. N. Sainath, O. Vinyals, A. Senior, H. Sak, Convolutional, long short-term memory, fully connected deep neural networks, in: 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, 2015, pp. 4580-4584. +[51] L. Muhammad, A. A. Haruna, U. S. Sharif, M. B. Mohammed, Cnn-lstm deep learning based forecasting model for Covid-19 infection cases in nigeria, south africa and botswana, Health and technology 12 (6) (2022) 1259–1276. + +[52] L. Alzubaidi, J. Zhang, A. J. Humaidi, A. Al-Dujaili, Y. Duan, O. Al-Shamma, J. Santamaría, M. A. Fadhel, M. Al-Amidie, L. Farhan, Review of deep learning: concepts, cnn architectures, challenges, applications, future directions, Journal of big Data 8 (2021) 1-74. +[53] G. Najera-Gutierrez, J. A. Ansari, Web Penetration Testing with Kali Linux: Explore the methods and tools of ethical hacking with Kali Linux, Packt Publishing Ltd, 2018. +[54] S. Asadollahi, B. Goswami, M. Sameer, Ryu controller's scalability experiment on software defined networks, in: 2018 IEEE international conference on current trends in advanced computing (ICCTAC), IEEE, 2018, pp. 1-5. +[55] K. Kaur, J. Singh, N. S. Ghumman, Mininet as software defined networking testing platform, in: International conference on communication, computing & systems (ICCCS), 2014, pp. 139-42. +[56] L. St, S. Wold, et al., Analysis of variance (anova), Chemometrics and intelligent laboratory systems 6 (4) (1989) 259-272. +[57] D. Breitenbacher, I. Homoliak, Y. L. Aung, N. O. Tippenhauer, Y. Elovici, Hades-iot: A practical host-based anomaly detection system for iot devices, in: Proceedings of the 2019 ACM Asia conference on computer and communications security, 2019, pp. 479-484. +[58] B. Chen, Y. Zhang, G. Iosifidis, M. Liu, Reinforcement learning on computational resource allocation of cloud-based wireless networks, in: 2020 IEEE 6th World Forum on Internet of Things (WF-IoT), IEEE, 2020, pp. 1-6. +[59] R. D. Corin, A. Costanzo, F. Callegati, D. Siracusa, Methods and techniques for dynamic deployability of software-defined security services, CoRR (2020). +[60] A. van de Ven, Powertop, [link]. +[61] N. F. Syed, Z. Baig, A. Ibrahim, C. Valli, Denial of service attack detection through machine learning for the IoT, Journal of Information and Telecommunication 4 (4) (2020) 482-503. +[62] K. Sonar, H. Upadhyay, A survey: Ddos attack on internet of things, International Journal of Engineering Research and Development 10 (11) (2014) 58-63. +[63] M. M. Raikar, S. Meena, Ssh brute force attack mitigation in internet of things (iot) network: An edge device security measure, in: 2021 2nd international conference on secure cyber computing and communications (ICSCCC), IEEE, 2021, pp. 72-77. +[64] Q. A. Al-Haija, E. Saleh, M. Alnabhan, Detecting port scan attacks using logistic regression, in: 2021 4th International symposium on advanced electrical and communication technologies (ISAECT), IEEE, 2021, pp. 1-5. +[65] Z. Campbell, A. Bray, A. Ritz, A. Groce, Differentially private anova testing, in: 2018 1st International Conference on Data Intelligence and Security (ICDIS), IEEE, 2018, pp. 281-285. +[66] H. Wei, X. Song, Smooth tests for normality in anova, arXiv preprint arXiv:2110.04849 (2021). +[67] E. Frimpong, A performance study of the snort ids (2008). +[68] D. Fadhilah, M. I. Marzuki, Performance analysis of ids snort and ids suricata with many-core processor in virtual machines against dos/ddos attacks, in: 2020 2nd International Conference on Broadband Communications, Wireless Sensors and Powering (BCWSP), IEEE, 2020, pp. 157-162. +[69] M. Hawedi, C. Talhi, H. Boucheneb, Multi-tenant intrusion detection system for public cloud (mtids), The Journal of Supercomputing 74 (2018) 5199–5230. +[70] S. M. Raza, J. Jeong, M. Kim, B. Kang, H. Choo, Empirical performance and energy consumption evaluation of container solutions on resource constrained IoT gateways, Sensors 21 (4) (2021) 1378. +[71] W. Park, S. Ahn, Performance comparison and detection analysis in snort and suricata environment, Wireless Personal Communications 94 (2017) 241-252. +[72] E. Ozturk Kiyak, B. Ghasemkhani, D. Birant, High-level k-nearest neighbors (hlknn): A supervised machine learning model for classification analysis, Electronics 12 (18) (2023) 3828. +[73] E. Altulaihan, M. A. Almaiah, A. Aljughaiman, Anomaly detection ids for detecting dos attacks in IoT networks based on machine learning + +algorithms, Sensors 24 (2) (2024) 713. + +[74] U. Nations, United nations goals: Sustainable development, [link], accessed: September 3, 2024 (2023). +[75] F. Khomh, S. A. Abtahizadeh, Understanding the impact of cloud patterns on performance and energy consumption, Journal of Systems and Software 141 (2018) 151-170. +[76] C. Wohlin, P. Runeson, M. Höst, M. C. Ohlsson, B. Regnell, A. Wesslén, Experimentation in software engineering, Springer Science & Business Media, 2012. +[77] S. Jamshidi, Replication packages, [link]. + +# Appendix + +Table 13: Abbreviations used in this research. + +
AbbreviationMeaning
AIArtificial Intelligence
ANOVAAnalysis of Variance
ANNArtificial Neural Network
BTBoosting Tree
CPUCentral Processing Unit
DAEDeep Autoencoder
DDoSDistributed Denial-of-Service
DLDeep Learning
DoSDenial-of-Service
DTDecision Tree
GPUGraphics Processing Unit
IDSIntrusion Detection System
IoTInternet of Things
KNNK-Nearest Neighbor
LRLogistic Regression
LSTMLong Short-Term Memory
CNNConvolutional Neural Network
MCUMicrocontroller Unit
MITMMan-in-the-Middle
MLMachine Learning
MTDMoving Target Defense
NBNaïve Bayes
R2LRoot to Local
RFRandom Forest
RNNRecurrent Neural Network
SDNSoftware-Defined Networking
SDPNStacked-Deep Polynomial Network
SMOSpider Monkey Optimization
SMOTESynthetic Minority Oversampling Technique
SNNSpiking Neural Network
SVMSupport Vector Machine
U2RUser to Root
WFEUWrapper Feature Extraction Unit
WSNWireless Sensor Network
\ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09634/images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg b/data/2025/2504_09xxx/2504.09634/images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54ab3d2241a7fb4f6acbfb6f5a2b0695f8953312 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d482f2169ab6e9df119a29a1b3feb8686695cb6c5aa39cec13492b738de232e7 +size 61635 diff --git a/data/2025/2504_09xxx/2504.09634/images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg b/data/2025/2504_09xxx/2504.09634/images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a7a30c0efd5afe02ac473b5c522a10e1e50ecc6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2275bad5b31f71d8638c27d52ac1a3fa178053933da021ff83047243b965a5c5 +size 31550 diff --git a/data/2025/2504_09xxx/2504.09634/images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg b/data/2025/2504_09xxx/2504.09634/images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0d65e92ca7c5671f48d448f9e05a26eb2501475a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19dd1369014050af77f23d432d97a20de76a2cf2bd21d74891079bcd73036000 +size 30734 diff --git a/data/2025/2504_09xxx/2504.09634/images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg b/data/2025/2504_09xxx/2504.09634/images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe4354d502f6ecd3ccb32ec74885530898674b0d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e2fdd0d388d5b77f7bbaf5c0f5b5d5f7e99c47fd907516561db8e2e7b8fcaac8 +size 64874 diff --git a/data/2025/2504_09xxx/2504.09634/images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg b/data/2025/2504_09xxx/2504.09634/images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4985e9c82d98aab316fa2832e40682b72bf2470 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2394c75c53aa5d52fb52c02ce55c43528eb56352f5f234a10c9d9d65bba19ebc +size 30229 diff --git a/data/2025/2504_09xxx/2504.09634/images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg b/data/2025/2504_09xxx/2504.09634/images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2553420d24e58d502080a9f32b315d99cdb7bb15 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c769427387cdccdb733574bf20c8fef0eb62612f085bea16cdfc4047cce434b4 +size 33230 diff --git a/data/2025/2504_09xxx/2504.09634/images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg b/data/2025/2504_09xxx/2504.09634/images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27775d5537534ce55d7c5b1e46b7729c9054ff7b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:45b987c37439e48b72770976a88a200d42730778a9284b15b798f245b4af491f +size 29569 diff --git a/data/2025/2504_09xxx/2504.09634/images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg b/data/2025/2504_09xxx/2504.09634/images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5b219ca683af33aa239181071401b346dc199eab --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:01812e6c21e27b509d3e5a6e2fae082299dfd873263853a35ade219bd6216359 +size 36751 diff --git a/data/2025/2504_09xxx/2504.09634/images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg b/data/2025/2504_09xxx/2504.09634/images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg new file mode 100644 index 0000000000000000000000000000000000000000..76a49adba1711467d8d7794a3e3774f2a38f4bee --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:701723c80d3b0039662cd6e59598bc70599896e9df04de2583eb4961cc6ebb1c +size 32791 diff --git a/data/2025/2504_09xxx/2504.09634/images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg b/data/2025/2504_09xxx/2504.09634/images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8b81ab887f246261943da4b248f2c8a9927705d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:001e60b77f5e3c2930c5618a2ea2164025af2a6e9c36fb5fd553dbe3fc9f3dbb +size 37999 diff --git a/data/2025/2504_09xxx/2504.09634/images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg b/data/2025/2504_09xxx/2504.09634/images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ac019bdb705177b7cbcc3b01bc37b483ed8e6ab8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5f1b80db3d6e83c7bf0d03f3e3b0dc5e5f534c120372acbf6238b37618b6feb4 +size 30504 diff --git a/data/2025/2504_09xxx/2504.09634/images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg b/data/2025/2504_09xxx/2504.09634/images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aca94ade4b7fd0604cf4ed0f350b4247c062f6f4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a6a273d08312900fa23eca7a1aa0acf3ff55f78f6c276ecce2f20cf567326606 +size 107372 diff --git a/data/2025/2504_09xxx/2504.09634/images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg b/data/2025/2504_09xxx/2504.09634/images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..702d51e0e02486eeaad9e27d1e2ffa88f0109fbc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:560d245d813ececf44e823d822c15f433b6106e41d92e16fc26d9a7f81d83331 +size 30548 diff --git a/data/2025/2504_09xxx/2504.09634/images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg b/data/2025/2504_09xxx/2504.09634/images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e141eac606f24ef83cc7f45d4d52f3b1ab0624ae --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73efe3da1da5845d8e271563c38bb0790ab3c37cff8e126649d7da00f1660a42 +size 137125 diff --git a/data/2025/2504_09xxx/2504.09634/images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg b/data/2025/2504_09xxx/2504.09634/images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7a3b069be5658173f293005eb339092cc05b9f3a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a9b9d0537265974e0ce97def2f6aba7a2d872bce07b11da8392f7a97a0e3ea98 +size 31327 diff --git a/data/2025/2504_09xxx/2504.09634/images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg b/data/2025/2504_09xxx/2504.09634/images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79dacd7e271f40c1ad80daf6890f0ce0a586f12e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2bd7480c4b36c7bd8c5ddd4e4128a8bea513ab1a362531c0979bb01ecf79b9e1 +size 32038 diff --git a/data/2025/2504_09xxx/2504.09634/images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg b/data/2025/2504_09xxx/2504.09634/images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg new file mode 100644 index 0000000000000000000000000000000000000000..151d428db9e194ef58b643822a3b0eb3bd8220aa --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c457701d4e16634dfce821911290d91ccf23dfeae71b24d1f89d6405f8932987 +size 32521 diff --git a/data/2025/2504_09xxx/2504.09634/images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg b/data/2025/2504_09xxx/2504.09634/images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9b1cb7ddfa7e717cfbd95b4b549771776345b910 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ec1aacb88df2ce9e536cf3cc6934f8a71b70bc1ed755a5ed1a9024ab0d4e6173 +size 26906 diff --git a/data/2025/2504_09xxx/2504.09634/images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg b/data/2025/2504_09xxx/2504.09634/images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0b1401d2625834286b13a0ea8e17071a2b4e4399 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:72aed6586f45bfd215bf56003085ff8f4bd601b982c0debf051994eca8c5ac68 +size 34009 diff --git a/data/2025/2504_09xxx/2504.09634/images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg b/data/2025/2504_09xxx/2504.09634/images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ef3d0e675859cae3b621028a4e0cd81c4da82323 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ed2b73f78e64b7f8105680a2cfd91ecae90bc7a9ef3126b1c6136e7c0b2e5502 +size 211571 diff --git a/data/2025/2504_09xxx/2504.09634/images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg b/data/2025/2504_09xxx/2504.09634/images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9d6543236cb1d0ad1d05cff5fdc9ee39ad9bb9f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/images/ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:963e7f25b609750bced1d83dbe1d59127dc6aee949cc83d9ba3bd327a3da354c +size 36013 diff --git a/data/2025/2504_09xxx/2504.09634/layout.json b/data/2025/2504_09xxx/2504.09634/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e4d8aae926f91545ab5e2314d368233ff86c11ca --- /dev/null +++ b/data/2025/2504_09xxx/2504.09634/layout.json @@ -0,0 +1,13229 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 48, + 50, + 525, + 89 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 50, + 525, + 89 + ], + "spans": [ + { + "bbox": [ + 48, + 50, + 525, + 89 + ], + "type": "text", + "content": "Evaluating Machine Learning-Driven Intrusion Detection Systems in IoT: Performance and Energy Consumption" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 48, + 99, + 384, + 113 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 99, + 384, + 113 + ], + "spans": [ + { + "bbox": [ + 48, + 99, + 384, + 113 + ], + "type": "text", + "content": "Saeid Jamshidi, Kawser Wazed Nafi, Amin Nikanjam, Foutse Khomh" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 123, + 242, + 134 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 123, + 242, + 134 + ], + "spans": [ + { + "bbox": [ + 48, + 123, + 242, + 134 + ], + "type": "text", + "content": "SWAT, Polytechnique, Montréal, H3T 1J4, Quebec, Canada" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 149, + 134, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 149, + 134, + 160 + ], + "spans": [ + { + "bbox": [ + 48, + 149, + 134, + 160 + ], + "type": "text", + "content": "ARTICLEINFO" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 169, + 85, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 169, + 85, + 179 + ], + "spans": [ + { + "bbox": [ + 48, + 169, + 85, + 179 + ], + "type": "text", + "content": "Keywords:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 179, + 175, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 179, + 175, + 216 + ], + "spans": [ + { + "bbox": [ + 48, + 179, + 175, + 216 + ], + "type": "text", + "content": "Machine Learning, Intrusion Detection System, Energy Consumption, Software-Defined Networking, SDN-IoT" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 221, + 150, + 287, + 160 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 150, + 287, + 160 + ], + "spans": [ + { + "bbox": [ + 221, + 150, + 287, + 160 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 220, + 169, + 545, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 169, + 545, + 245 + ], + "spans": [ + { + "bbox": [ + 220, + 169, + 545, + 245 + ], + "type": "text", + "content": "In the landscape of network security, the integration of Machine Learning (ML)-based Intrusion Detection System (IDS) represents a significant leap forward, especially in the domain of the Internet of Things (IoT) and Software-Defined Networking (SDN). Such ML-based IDS are crucial for improving security infrastructures, and their importance is increasingly pronounced in IoT systems. However, despite the rapid advancement of ML-based IDS, there remains a gap in understanding their impact on critical performance metrics (e.g., CPU load, energy consumption, and CPU usage) in resource-constrained IoT devices. This becomes especially crucial in scenarios involving real-time cyber threats that challenge IoT devices in a public/private network." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 220, + 245, + 545, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 220, + 245, + 545, + 368 + ], + "spans": [ + { + "bbox": [ + 220, + 245, + 545, + 368 + ], + "type": "text", + "content": "To address this gap, this article presents an empirical study that evaluates the impact of state-of-the-art ML-based IDSs on performance metrics such as CPU usage, energy consumption, and CPU load in the absence and presence of real-time cyber threats, with a specific focus on their deployment at the edge of IoT infrastructures. We also incorporate SDN to evaluate the comparative performance of ML-based IDSs with and without SDN. To do so, we focus on the impact of both SDN's centralized control and dynamic resource management on the performance metrics of an IoT system. Finally, we analyze our findings using statistical analysis using the Analysis of Variance (ANOVA) analysis. Our findings demonstrate that traditional ML-based IDS, when implemented at the edge gateway with and without SDN architecture, significantly affects performance metrics against cyber threats compared to DL-based ones. Also, we observed substantial increases in energy consumption, CPU usage, and CPU load during real-time cyber threat scenarios at the edge, underscoring the resource-intensive nature of these systems. This research fills the existing knowledge void and delivers essential insights into the operational dynamics of ML-based IDS at edge gateway in IoT systems." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 389, + 131, + 402 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 389, + 131, + 402 + ], + "spans": [ + { + "bbox": [ + 48, + 389, + 131, + 402 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 406, + 289, + 549 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 406, + 289, + 549 + ], + "spans": [ + { + "bbox": [ + 48, + 406, + 289, + 549 + ], + "type": "text", + "content": "The rapid expansion of the Internet of Things (IoT) has ushered in an era where data flows seamlessly across various sectors, driving profound changes in how devices interact [1][2]. This intricate IoT ecosystem, composed of countless devices, sensors, and intelligent nodes, has fundamentally reshaped how we think about device communication, significantly minimizing the need for human involvement [3]. The integration of Software-Defined Networking (SDN) within the IoT landscape represents a significant step forward, creating a unified IoT-SDN framework that offers centralized control, improved network management, and stronger security measures [4][5]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 48, + 549, + 289, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 549, + 289, + 655 + ], + "spans": [ + { + "bbox": [ + 48, + 549, + 289, + 655 + ], + "type": "text", + "content": "The rapid expansion of IoT, driven by the interconnection of millions of devices via Wireless Sensor Networks (WSNs), presents significant challenges [6]. These challenges stem mainly from these devices' limited memory, power, and battery life, highlighting the need for optimized computing and advanced data analysis techniques [7]. Deploying SDN within this framework aims to overcome these obstacles by offering a streamlined, secure network infrastructure that facilitates effective resource allocation and enhanced threat" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 389, + 361, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 389, + 361, + 399 + ], + "spans": [ + { + "bbox": [ + 303, + 389, + 361, + 399 + ], + "type": "text", + "content": "management." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 399, + 545, + 482 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 399, + 545, + 482 + ], + "spans": [ + { + "bbox": [ + 302, + 399, + 545, + 482 + ], + "type": "text", + "content": "Given the widespread security vulnerabilities in IoT networks, such as service disruptions and unauthorized access, the importance of Machine Learning (ML)-based Intrusion Detection Systems (IDS) has grown [8]. ML-based IDS are crucial for protecting network integrity due to their ability to adapt dynamically and effectively identify threats [9][10] [11]." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 482, + 545, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 482, + 545, + 625 + ], + "spans": [ + { + "bbox": [ + 302, + 482, + 545, + 625 + ], + "type": "text", + "content": "However, despite advancements in developing ML-based IDS for IoT, several critical gaps remain, as highlighted by Tekin et al. [12]. While previous research has examined ML-based IDS's performance in controlled, static testbed environments, there is a significant gap in understanding how these systems operate under the dynamic conditions of real-time cyber threats, especially when IoT is integrated with SDN. Moreover, while the potential of SDN to significantly enhance resource management in IoT systems is widely acknowledged [13][14][15], there is a lack of empirical evidence on how SDN interacts with ML-based IDS during cyber threats." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 626, + 545, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 626, + 545, + 733 + ], + "spans": [ + { + "bbox": [ + 302, + 626, + 545, + 733 + ], + "type": "text", + "content": "In this study, we set two primary objectives designed to deepen our understanding of network performance metrics in IoT. Firstly, we assess the impact of deploying ML-based IDS at edge gateway, mainly focusing on ML-based IDS performance metrics under real-time cyber threats. Secondly, we explore the impact of integrating SDN with our testbed, again at edge gateway, to evaluate its influence on performance metrics under similar cyber threats. The rationale behind incorporating SDN into our testbed is its" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 64, + 660, + 140, + 670 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 660, + 140, + 670 + ], + "spans": [ + { + "bbox": [ + 64, + 660, + 140, + 670 + ], + "type": "text", + "content": "*Corresponding author" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 63, + 671, + 168, + 681 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 671, + 168, + 681 + ], + "spans": [ + { + "bbox": [ + 63, + 671, + 168, + 681 + ], + "type": "text", + "content": "**Principal corresponding author" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 682, + 201, + 693 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 682, + 201, + 693 + ], + "spans": [ + { + "bbox": [ + 67, + 682, + 201, + 693 + ], + "type": "text", + "content": "jamshidi.saeid@polymt1.ca," + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 50, + 693, + 189, + 701 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 693, + 189, + 701 + ], + "spans": [ + { + "bbox": [ + 50, + 693, + 189, + 701 + ], + "type": "text", + "content": "kawser.wazed-nafi@polymtl.ca," + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 49, + 703, + 285, + 720 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 703, + 285, + 720 + ], + "spans": [ + { + "bbox": [ + 49, + 703, + 285, + 720 + ], + "type": "text", + "content": "amin.nikanjam@polymt1.ca, foutse.khomh@polymt1.ca (S.J.K.W.N.A.N.F. Khomh)" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 721, + 99, + 730 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 99, + 730 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 99, + 730 + ], + "type": "text", + "content": "ORcld(s):" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 48, + 756, + 247, + 767 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 756, + 247, + 767 + ], + "spans": [ + { + "bbox": [ + 48, + 756, + 247, + 767 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "spans": [ + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "type": "text", + "content": "Page 1 of 21" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 55, + 289, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 55, + 289, + 197 + ], + "spans": [ + { + "bbox": [ + 49, + 55, + 289, + 197 + ], + "type": "text", + "content": "potential to improve resource management in IoT systems significantly [16][17]. We conduct a comparative analysis of the performance of seven state-of-the-art ML-based IDSs in two distinct setups: firstly, at the edge gateway, and secondly, in a similar setup augmented with SDN integration at the edge gateway, all under real-time cyber threats. This analysis is designed to elucidate the impact of SDN on performance metrics and resource management in IoT systems, especially highlighting how SDN integration can optimize the operational efficiency and resilience of IoT networks against the backdrop of evolving cyber threats. To summarize, this paper makes the following contributions:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 206, + 290, + 519 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 67, + 206, + 290, + 302 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 206, + 290, + 302 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 290, + 302 + ], + "type": "text", + "content": "- Assessing performance metrics of ML-based IDS in IoT systems under real-time cyber threats: Our investigation revealed the significant impact of seven ML-based IDS on the performance at the edge, specifically measuring CPU usage, CPU load, and energy consumption amidst cyber threats. Utilizing ANOVA, we clarify the operational consequences of deploying these sophisticated IDSs on the edge." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 309, + 290, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 309, + 290, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 309, + 290, + 392 + ], + "type": "text", + "content": "- Evaluating the impact of ML-based IDS at edge integrated with SDN: we evaluated the performance metrics of seven ML-based IDS at the edge gateway system integrated with SDN. Utilizing ANOVA, we clarify the impact of the integrated SDN with IoT on deploying these sophisticated IDS under real-time cyber threats." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 401, + 290, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 401, + 290, + 519 + ], + "spans": [ + { + "bbox": [ + 67, + 401, + 290, + 519 + ], + "type": "text", + "content": "- Proposing a plugin-based ML-based IDS test suite: This test suite comes with a group of available datasets and available ML-based IDSs and allows the users to define their own IoT and SDN applications and test their ML-based IDSs and models in terms of detection accuracy and performance metrics. Researchers can efficiently perform comparative analyses for their algorithms and models with other available algorithms and models. The test suite is publicly available (section 8) for researchers and practitioners to reuse." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 49, + 528, + 289, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 528, + 289, + 622 + ], + "spans": [ + { + "bbox": [ + 49, + 528, + 289, + 622 + ], + "type": "text", + "content": "The remainder of this paper is organized as follows: Section 2 discusses the review of our research literature. Section 3 discusses the necessary background knowledge. In Section 4, we describe the experimental design, the Research Questions (RQs), and the metrics of the experiments. Section 5 explains our results and findings. Section 8 discusses threats to the validity of our study. Finally, Section 9 concludes the paper and outlines future work." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 640, + 141, + 653 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 640, + 141, + 653 + ], + "spans": [ + { + "bbox": [ + 50, + 640, + 141, + 653 + ], + "type": "text", + "content": "2. Related Works" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 658, + 289, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 658, + 289, + 741 + ], + "spans": [ + { + "bbox": [ + 50, + 658, + 289, + 741 + ], + "type": "text", + "content": "Understanding the performance trade-offs of ML-based IDS in IoT, especially in resource-constrained edge gateways, remains an open challenge. While numerous studies, as mentioned in the previous section, have focused on detection accuracy, limited research has analyzed their real-time computational impact. In particular, there is a significant gap in understanding how ML-based IDS operate under real-time" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 55, + 544, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 55, + 544, + 103 + ], + "spans": [ + { + "bbox": [ + 305, + 55, + 544, + 103 + ], + "type": "text", + "content": "cyber threats, especially when integrated with SDN. This section reviews prior works on ML-based IDS in IoT and SDN, examining their strengths and limitations and focusing on ML models and energy consumption concerns." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 113, + 440, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 113, + 440, + 124 + ], + "spans": [ + { + "bbox": [ + 305, + 113, + 440, + 124 + ], + "type": "text", + "content": "2.1. IoT Intrusion Detection" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 126, + 544, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 126, + 544, + 267 + ], + "spans": [ + { + "bbox": [ + 305, + 126, + 544, + 267 + ], + "type": "text", + "content": "Alsulami et al. [18] proposed a new ML model to identify and categorize network activity in IoT systems. Their research aimed to classify network traffic into distinct categories, including normal behavior and various types of attacks (e.g., Mirai, Denial-of-Service (DoS), Scan, and Man-in-the-Middle (MITM)). The study involved testing several supervised learning models on the customized IoTID20 dataset, including Spiking Neural Networks (SNNs), DT, Boosting Trees (BT), Support Vector Machines (SVM), and KNN. These models, enhanced through deep feature engineering, effectively identified and classified network anomalies." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 268, + 544, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 268, + 544, + 435 + ], + "spans": [ + { + "bbox": [ + 305, + 268, + 544, + 435 + ], + "type": "text", + "content": "Mukherjee et al. [19] conducted an in-depth investigation into the predictive capabilities of supervised learning models (e.g., Logistic Regression (LR), Naïve Bayes (NB), DT, RF, and Artificial Neural Network (ANN)) for anomaly detection. Their study utilized a dataset comprising 350,000 data points. The research compared these models against established state-of-the-art techniques, including BIRCH clustering and K-Means, and evaluated their performance in different scenarios. This included an analysis using the complete dataset and a separate evaluation after removing binary data points in the 'value' feature. The models demonstrated high precision in both scenarios, underscoring their efficacy in practical anomaly forecasting and enhancing security measures against potential risks." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 436, + 544, + 566 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 436, + 544, + 566 + ], + "spans": [ + { + "bbox": [ + 305, + 436, + 544, + 566 + ], + "type": "text", + "content": "Elnakib et al. [20] proposed the Enhanced Intrusion Detection Deep Learning Multi-class Classification Model (EIDM), a sophisticated Deep Learning (DL) model designed to enhance security in the IoT context. This model is adept at accurately categorizing 15 distinct traffic characteristics, encompassing a range of 14 discrete attack types. The performance of EIDM was evaluated against four other contemporary models, focusing on classification accuracy and efficiency. The increased precision of EIDM highlights its promise as a powerful solution for safeguarding IoT networks against a wide range of attacks." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 566, + 544, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 566, + 544, + 685 + ], + "spans": [ + { + "bbox": [ + 305, + 566, + 544, + 685 + ], + "type": "text", + "content": "Douiba et al. [21] proposed an innovative IDS to enhance IoT device security. Their approach utilized gradient boosting and DT in the Catboost framework. The model's performance was rigorously assessed on several datasets, including NSL-KDD, IoT-23, BoT-IoT, and Edge-IIoT, with optimization achieved through GPU acceleration. The IDS distinguished itself with its ability to detect anomalies in real-time and its computing efficiency, demonstrating high accuracy, recall, and precision metrics, around " + }, + { + "bbox": [ + 305, + 566, + 544, + 685 + ], + "type": "inline_equation", + "content": "99.9\\%" + }, + { + "bbox": [ + 305, + 566, + 544, + 685 + ], + "type": "text", + "content": " on a record detection and computation time." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 686, + 544, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 686, + 544, + 733 + ], + "spans": [ + { + "bbox": [ + 305, + 686, + 544, + 733 + ], + "type": "text", + "content": "Kasongo et al. [22] presented a research endeavor in which they proposed a Feed-Forward Deep Neural Network (FFDNN) IDS, enhanced by the inclusion of a Wrapper Feature Extraction Unit (WFEU) utilizing the Extra Trees" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 2 of 21" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 54, + 289, + 162 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 54, + 289, + 162 + ], + "spans": [ + { + "bbox": [ + 49, + 54, + 289, + 162 + ], + "type": "text", + "content": "algorithm. The WFEU-FFDNN was evaluated for its performance on several datasets, including UNSW-NB15 and AWID, and compared with traditional ML methods. The system demonstrated high classification accuracies in binary and multiclass classifications across these datasets, significantly outperforming in scenarios involving the AWID dataset. The enhanced precision of the WFEU-FFDNN model emphasizes its efficacy in real-time anomaly detection and computing efficiency." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 163, + 289, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 163, + 289, + 281 + ], + "spans": [ + { + "bbox": [ + 49, + 163, + 289, + 281 + ], + "type": "text", + "content": "In addition to all of the works stated above, Verma et al. [23] examined ML algorithms in the context of augmenting security measures in the IoT. The researchers compared classifiers using benchmark datasets (e.g., CIDDS-001, UNSW-NB15, and NSL-KDD). This analysis was supported by statistical tests, namely the Friedman and Nemenyi tests. The researchers also evaluated the reaction times on the Raspberry Pi platform, showcasing the adaptability and efficiency of the classifiers in IoT scenarios, hence emphasizing their practical relevance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 282, + 289, + 435 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 282, + 289, + 435 + ], + "spans": [ + { + "bbox": [ + 49, + 282, + 289, + 435 + ], + "type": "text", + "content": "Otoum et al. [24] presented a scholarly investigation in which they propose a DL-powered intrusion detection system (DL-based IDS) to effectively address challenges associated with feature learning and dataset management. The DL-based IDS developed by the researchers integrates the Spider Monkey Optimization(SMO) algorithm with the stacked-deep polynomial network (SDPN) to enhance threat identification. The system can detect various abnormalities, including DoS, User to Root attacks (U2R), probing, and Root-to-local attacks (R2L). The DL-based IDS was evaluated using the NSL-KDD dataset and exhibited outstanding performance metrics, showcasing its efficacy in various aspects of threat detection." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 436, + 289, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 436, + 289, + 555 + ], + "spans": [ + { + "bbox": [ + 49, + 436, + 289, + 555 + ], + "type": "text", + "content": "Gaber et al. [25] highlight securing IoT systems, especially in complex environments ( e.g., smart cities). The authors introduced a feature selection methodology that combines constant removal and recursive feature elimination strategies. They utilized a DT classifier with a subset of 8 characteristics, assessed on the AWID dataset using various ML classifiers. In contrast to existing methods, their approach exhibited exceptional performance, achieving high accuracy, precision, and F1 score rates. These results underscore the potential of their methodology in the domain of IoT-IDS." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 556, + 289, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 556, + 289, + 685 + ], + "spans": [ + { + "bbox": [ + 49, + 556, + 289, + 685 + ], + "type": "text", + "content": "Sachdeva et al. [26] investigate the issue of fortifying cybersecurity in IoT networks to mitigate the impact of distributed denial-of-service (DDoS) attacks. The authors put out an innovative approach for data pre-processing, which involves the integration of ML and DL classifiers. The class imbalances in the BOT-IoT and TON-IoT datasets from UNSW Australia are mitigated using several Synthetic Minority Oversampling Technique (SMOTE) variants. The hybrid methodology employed in this study, which integrates many algorithms, demonstrates the promising prospects for efficient detection of DDoS attacks in IoT networks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 699, + 288, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 699, + 288, + 733 + ], + "spans": [ + { + "bbox": [ + 50, + 699, + 288, + 733 + ], + "type": "text", + "content": "The related works discussed above show that the most ML-based IDS developed and re-used by researchers are DT, KNN, RF, LSTM, CNN, and a hybrid model of CNN and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 55, + 543, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 55, + 543, + 103 + ], + "spans": [ + { + "bbox": [ + 305, + 55, + 543, + 103 + ], + "type": "text", + "content": "LSTM. In addition, EIDM is the most recent work that has overcome the limitations of the previous ML models. That is why we proceed with all these six ML-based IDS to carry out our study in this paper." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 112, + 458, + 124 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 112, + 458, + 124 + ], + "spans": [ + { + "bbox": [ + 305, + 112, + 458, + 124 + ], + "type": "text", + "content": "2.2. Energy consumption in IDS" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 125, + 544, + 362 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 125, + 544, + 362 + ], + "spans": [ + { + "bbox": [ + 305, + 125, + 544, + 362 + ], + "type": "text", + "content": "Only a tiny amount of research has been done so far to determine the energy consumption in IDS. Among them, Tekin et al. [12] investigated the topic of IDS in the context of the IoT, with a specific focus on the energy consumption aspect in devices with limitations. The authors assessed various ML paradigms in the context of cloud computing, edge computing, and IoT devices. They specifically emphasize the promising capabilities of TinyML for microcontroller units (MCUs). DT algorithm demonstrates in terms of training, inference, and power efficiency. Although Naive Bayes (NB) has superior training speed, it exhibits a minor accuracy trade-off requirements of the KNN algorithm increase proportionally with the quantity of the dataset, hence diminishing its suitability for deployment in IoT systems. Both DT and RF exhibit low power consumption and high accuracy. However, it is essential to consider that RF's longer execution time represents a trade-off. The research findings also elucidate the advantages and constraints of cloud-based ML, underscoring the significance of algorithm choice in practical implementations." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 363, + 543, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 363, + 543, + 505 + ], + "spans": [ + { + "bbox": [ + 305, + 363, + 543, + 505 + ], + "type": "text", + "content": "Nimmy et al. [27] utilize the energy consumption patterns of IoT devices to identify irregularities in smart home environments. They developed a prototype of a smart camera based on Raspberry Pi to gather power traces during regular operations and simulated DDoS attacks. This approach emphasizes the importance of energy consumption as a crucial indicator of aberrant behaviors. The deep feedforward neural network used in their study demon- strates exceptional performance in identifying anomalies, as evidenced by rigorous evaluations of ML models. This indicates its potential to enhance the security of smart homes significantly." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 516, + 477, + 526 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 516, + 477, + 526 + ], + "spans": [ + { + "bbox": [ + 305, + 516, + 477, + 526 + ], + "type": "text", + "content": "2.3. IoT Intrusion Detection in SDN" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 528, + 544, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 528, + 544, + 716 + ], + "spans": [ + { + "bbox": [ + 305, + 528, + 544, + 716 + ], + "type": "text", + "content": "Chaganti et al. [28] present a sophisticated IDS for IoT networks. This system leverages SDN and specifically emphasizes the utilization of DL techniques. The research is for its utilization of LSTM networks, a Recurrent Neural Network (RNN) type renowned for its efficacy in handling time series data, which is critical in detecting network threats. The authors' principal contribution is utilizing an LSTM model, which they employ to discern network attacks. To evaluate the efficacy of their approach, the authors conduct a comparative analysis with alternative architectures(e.g., SVM). The experimental findings present solid evidence that highlights the improved efficacy of the LSTM model in accurately categorizing various network attacks. The LSTM model demonstrated exceptional accuracy and efficiency in detecting attack patterns, surpassing conventional ML models in precision and recall metrics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 718, + 543, + 740 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 718, + 543, + 740 + ], + "spans": [ + { + "bbox": [ + 305, + 718, + 543, + 740 + ], + "type": "text", + "content": "M. M. Isa et al. [29] present the DAERF model in their research, an innovative IDS for SDN. This model combines" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 3 of 21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 55, + 288, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 55, + 288, + 197 + ], + "spans": [ + { + "bbox": [ + 49, + 55, + 288, + 197 + ], + "type": "text", + "content": "a Deep Autoencoder (DAE) with an RF algorithm, creating a unique approach. The DAE excels in feature extraction and data dimensionality reduction. At the same time, the RF approach, known for using an ensemble of DTs, shows significant accuracy and robustness in classification tasks. The DAERF model was evaluated in a simulated SDN using commonly used datasets, demonstrating a high efficacy level. The integration of DL and ML in the DAERF model represents a novel approach that effectively identifies and categorizes network intrusions, enhancing the security of SDN systems and ensuring their capability to handle real-time applications with scalability and adaptability." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 199, + 288, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 199, + 288, + 399 + ], + "spans": [ + { + "bbox": [ + 49, + 199, + 288, + 399 + ], + "type": "text", + "content": "Phan The Duy et al. [30] presented 'FoolYE,' an innovative IDS designed specifically for SDN systems. The system combines cyber deception techniques with Moving Target Defense (MTD) methodologies. The core of this methodology lies in its ability to create a dynamic and misleading network environment, making it challenging for malicious actors to identify and exploit genuine resources. A key innovation is deep transfer learning-based IDS, which employs advanced DL models (e.g., ResNet50 and DenseNet161), originally designed for image recognition. These models have been adapted using deep transfer learning techniques to analyze network traffic for ML-based IDS, demonstrating the versatility and efficacy of DL in cybersecurity. The study involved experiments in simulated SDN systems, where the performance of the IDS was thoroughly examined, showing its high capability in accurately detecting a wide range of network intrusions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 401, + 288, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 401, + 288, + 578 + ], + "spans": [ + { + "bbox": [ + 49, + 401, + 288, + 578 + ], + "type": "text", + "content": "Despite advancements in ML-based IDS for IoT, a significant gap remains in understanding their real-time computational impact, especially in energy consumption, CPU load, and CPU usage at the edge gateway. This gap is further compounded by the lack of empirical studies evaluating the effectiveness and efficiency of ML-based IDS in real-world, resource-constrained edge gateway, especially when integrated with SDN during cyber threats. To address these shortcomings, our study provides a comprehensive empirical analysis of ML-based IDS, focusing on their performance trade-offs in SDN-enabled and non-SDN edge gateways. Specifically, we assess how different ML-based IDS models impact system resources under real-time cyber threats, offering critical insights into their feasibility for deployment in IoT networks." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 593, + 129, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 593, + 129, + 606 + ], + "spans": [ + { + "bbox": [ + 50, + 593, + 129, + 606 + ], + "type": "text", + "content": "3. Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 610, + 287, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 610, + 287, + 632 + ], + "spans": [ + { + "bbox": [ + 50, + 610, + 287, + 632 + ], + "type": "text", + "content": "This section dives into the underlying premise of the research's baselines." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 634, + 288, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 634, + 288, + 741 + ], + "spans": [ + { + "bbox": [ + 49, + 634, + 288, + 741 + ], + "type": "text", + "content": "Decision Tree (DT): In the field of IDS, DT is a key ML method for analyzing network data. They use trees, e.g., models, to break down network features into binary decisions, evaluating network attributes at each node to identify effective splits. This creates a rule-based hierarchy that excels at spotting differences between normal and suspicious network activities. DTs are valued for their clarity and ease of interpretation, playing a vital in improving cybersecurity by identifying unusual or unauthorized actions" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 55, + 343, + 65 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 55, + 343, + 65 + ], + "spans": [ + { + "bbox": [ + 305, + 55, + 343, + 65 + ], + "type": "text", + "content": "[31] [32]." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 67, + 544, + 173 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 67, + 544, + 173 + ], + "spans": [ + { + "bbox": [ + 305, + 67, + 544, + 173 + ], + "type": "text", + "content": "Random Forest (RF): The algorithm is highly valued in IDS for its precision in classifying network data. Utilizing RF, an ML algorithm, it creates a group of DT to assess various network attributes, effectively distinguishing between normal and malicious activities. RF excels in managing large datasets, balancing IDS data disparities, and minimizing overfitting, making IoT and network security crucial. It achieves accurate detection of unusual network behaviors [33] [34]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 175, + 544, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 175, + 544, + 448 + ], + "spans": [ + { + "bbox": [ + 305, + 175, + 544, + 448 + ], + "type": "text", + "content": "K-Nearest Neighbor (KNN): The KNN algorithm is a key IDS tool known for its effective similarity-based classification. It compares network traffic with existing labeled data using distance metrics to classify new instances, with 'k' indicating the number of neighbors considered. This method is crucial for identifying normal versus abnormal network activities, offering a simple yet versatile solution for real-time IDS. KNN excels in both binary and multiclass problems, providing quick, reliable categorizations crucial for responding to threats in dynamic networks [35] [36] [37]. Long short-term memory (LSTM): LSTM networks, a type of recurrent neural network, are highly effective in analyzing sequential data for IDS. Their unique memory cells excel at identifying complex patterns in network traffic, making them adept at spotting advanced threats that traditional methods may miss. LSTMs are especially valuable for maintaining context over data sequences, which is crucial for distinguishing between normal and malicious network activities. Their application in IDS significantly boosts cybersecurity, especially in dynamic and IoT environments, by adapting to new threats and efficiently handling varying data lengths, offering a robust solution to modern cybersecurity challenges [38] [39]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 448, + 544, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 448, + 544, + 578 + ], + "spans": [ + { + "bbox": [ + 305, + 448, + 544, + 578 + ], + "type": "text", + "content": "Convolutional Neural Network(CNN): CNNs provide a resilient DL methodology for IDS. CNNs are widely recognized for their ability to independently acquire hierarchical features from network traffic. This is achieved through convolutional, pooling, and fully connected layers, which enable the discernment of spatial patterns in the traffic data. This capacity facilitates the recognition of both well-established and new threats. CNN in IDS is considered crucial in enhancing cybersecurity defenses against a wide range of cyber threats due to their capacity to scale effectively and efficiently handle real-time data [40] [41]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 580, + 544, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 580, + 544, + 733 + ], + "spans": [ + { + "bbox": [ + 305, + 580, + 544, + 733 + ], + "type": "text", + "content": "Hybrid model of LSTM and CNN: The integration of LSTM and CNN models into IDS significantly boosts network security by combining the spatial analysis capabilities of CNNs with the temporal pattern recognition of LSTMs. This hybrid approach detects complex cyber threats by analyzing network traffic data in both spatial and temporal dimensions. CNNs effectively identify security breaches through local pattern recognition, while LSTMs track the sequence of network events over time, offering a detailed understanding of potential threats. This fusion results in more accurate and efficient detection of sophisticated, multistage attacks, reducing false positives and adapting to new threats, thereby enhancing overall anomaly detection and" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 4 of 21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 55, + 289, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 55, + 289, + 78 + ], + "spans": [ + { + "bbox": [ + 49, + 55, + 289, + 78 + ], + "type": "text", + "content": "maintaining network integrity without excessive alerts [42] [43]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 79, + 289, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 79, + 289, + 245 + ], + "spans": [ + { + "bbox": [ + 49, + 79, + 289, + 245 + ], + "type": "text", + "content": "EIDM: The EIDM is a cutting-edge IDS approach expertly handling a wide range of network events. Its design combines convolutional and dense layers to tackle the challenges of class diversity and data imbalance. The model begins with a 120-node dense layer, followed by an 80-neuron convolutional layer with a kernel size of 20 to better distinguish between similar network activities. It also features a Maxpooling layer for enhanced feature extraction and a dropout layer to avoid overfitting. EIDM can classify 15 network behaviors through six dense layers, using 'relu' activation and SGD and Adam optimizers for optimal accuracy and efficiency. According to [20], EIDM's unique structure and optimization techniques make it a standout solution for improving network IDS." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 263, + 131, + 277 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 263, + 131, + 277 + ], + "spans": [ + { + "bbox": [ + 50, + 263, + 131, + 277 + ], + "type": "text", + "content": "4. Study design" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 50, + 280, + 289, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 280, + 289, + 351 + ], + "spans": [ + { + "bbox": [ + 50, + 280, + 289, + 351 + ], + "type": "text", + "content": "This section describes our methodology to evaluate the impact of specific ML-based IDSs using selected performance metrics. We first mention our Research Questions (RQs), followed by an explanation of the experimental design and the metrics used to evaluate the impact of the ML-based IDS." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 362, + 190, + 375 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 362, + 190, + 375 + ], + "spans": [ + { + "bbox": [ + 50, + 362, + 190, + 375 + ], + "type": "text", + "content": "4.1. Research questions(RQs)" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 65, + 375, + 258, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 375, + 258, + 386 + ], + "spans": [ + { + "bbox": [ + 65, + 375, + 258, + 386 + ], + "type": "text", + "content": "Our research aims to address the following RQs:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 394, + 289, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 394, + 289, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 394, + 289, + 441 + ], + "type": "text", + "content": "- RQ1: How do ML-based IDSs impact CPU usage, CPU load, and energy consumption at the edge gateway without SDN during real-time cyber threats?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 73, + 442, + 289, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 442, + 289, + 525 + ], + "spans": [ + { + "bbox": [ + 73, + 442, + 289, + 525 + ], + "type": "text", + "content": "This RQ examines the impact of ML-based IDSs on crucial performance metrics, specifically CPU usage, CPU load, and energy consumption, at edge gateway not integrated with SDN. It focuses on analyzing the performance of seven state-of-the-art ML-based IDSs and their impacts on these key metrics in the face of diverse cyber threats." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 533, + 289, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 533, + 289, + 582 + ], + "spans": [ + { + "bbox": [ + 67, + 533, + 289, + 582 + ], + "type": "text", + "content": "- RQ2: What are the differences in CPU usage, CPU load, and energy consumption impacts of ML-based IDS at the edge gateway with SDN integration during real-time cyber threats?" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 73, + 582, + 289, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 582, + 289, + 641 + ], + "spans": [ + { + "bbox": [ + 73, + 582, + 289, + 641 + ], + "type": "text", + "content": "This RQ explores how ML-based IDSs influence CPU usage, CPU load, and energy consumption at the edge gateway integrated with SDN. It involves analyzing the impacts of various ML-based IDSs on these essential performance metrics under various cyber threats." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 50, + 651, + 110, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 651, + 110, + 662 + ], + "spans": [ + { + "bbox": [ + 50, + 651, + 110, + 662 + ], + "type": "text", + "content": "4.2. DataSet" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 50, + 665, + 289, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 665, + 289, + 735 + ], + "spans": [ + { + "bbox": [ + 50, + 665, + 289, + 735 + ], + "type": "text", + "content": "In our study, we used the CICIDS2017 data set [44], a highly regarded resource organized by the Canadian Institute for Cybersecurity. This dataset is recognized as one of the gold standards in cybersecurity research, capturing a broad spectrum of benign network activities and the latest cyberattacks [45]. CICIDS2017 is designed to simulate" + } + ] + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 310, + 81, + 536, + 264 + ], + "blocks": [ + { + "bbox": [ + 305, + 58, + 516, + 79 + ], + "lines": [ + { + "bbox": [ + 305, + 58, + 516, + 79 + ], + "spans": [ + { + "bbox": [ + 305, + 58, + 516, + 79 + ], + "type": "text", + "content": "Table 1 Distribution of labeled IoT-SDN attacks in the dataset" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 310, + 81, + 536, + 264 + ], + "lines": [ + { + "bbox": [ + 310, + 81, + 536, + 264 + ], + "spans": [ + { + "bbox": [ + 310, + 81, + 536, + 264 + ], + "type": "table", + "html": "
IoT Attack LabelsNo of labeled entries
BENIGN2271320
DoS Hulk230124
Port Scan158804
DDoS128025
DoS GoldenEye10293
FTP-Patator7935
SSH-Patator5897
DoS slowloris5796
DoS Slowhtttest5499
Bot1956
Web Attack & Brute Force1507
Web Attack & XSS652
Infiltration36
Web Attack & SQL Injection21
Heartbleed11
", + "image_path": "2a90fde578b9f825ce59035599dea091ebd2cf5ed5be137d79b307d09e9cfb2d.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 305, + 285, + 543, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 285, + 543, + 344 + ], + "spans": [ + { + "bbox": [ + 305, + 285, + 543, + 344 + ], + "type": "text", + "content": "real-world network environments, making it an essential resource for researchers to test and validate advanced IDS thoroughly. The breadth and diversity of the asset highlight its importance, making it necessary for those aiming to strengthen network security paradigms." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 305, + 355, + 418, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 355, + 418, + 366 + ], + "spans": [ + { + "bbox": [ + 305, + 355, + 418, + 366 + ], + "type": "text", + "content": "4.3. The ML-based IDS" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 305, + 367, + 544, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 367, + 544, + 569 + ], + "spans": [ + { + "bbox": [ + 305, + 367, + 544, + 569 + ], + "type": "text", + "content": "Numerous ML-based IDS have been developed by researchers [12] [22] [25] [46]. However, we had a significant challenge in reviewing these publications and selecting some for our study. Most did not make their solutions' applications or source code publicly available. This lack of transparency hinders the ability to experiment with these works in real IoT devices. This omission complicates, and may even prevent, the objective comparison of the proposed solutions. Consequently, to initiate our study, it became necessary to independently implement all ML-based IDS that have been previously utilized, except the ML-based IDS proposed by [20], which shared their code ML-based IDS available to researchers. In this section, we explore the implementation process of seven ML-based IDSs that we have developed: DT, KNN, RF, LSTM, CNN, and a hybrid model of LSTM and CNN. Table 3 presents a comparative analysis of the performance metrics of ML-based IDS." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 305, + 580, + 395, + 591 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 580, + 395, + 591 + ], + "spans": [ + { + "bbox": [ + 305, + 580, + 395, + 591 + ], + "type": "text", + "content": "4.3.1. DT, KNN, RF" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 305, + 593, + 544, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 593, + 544, + 734 + ], + "spans": [ + { + "bbox": [ + 305, + 593, + 544, + 734 + ], + "type": "text", + "content": "We have developed and deployed DT-based IDS, RF-based IDS, and KNN-based IDS [47], each specifically designed to improve security policy. The foundation of these models is a preprocessing technique applied to the selected CICIDS 2017 dataset. The dataset features various simulated cyber-attack scenarios alongside standard traffic data. It encompasses multiple numerical attributes, including but not limited to packet sizes, flow durations, and bytes per flow, which are critical for analyzing network behavior and detecting anomalies. We applied min-max normalization as our initial preprocessing step to ensure uniformity across these diverse numerical attributes and" + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 492, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 5 of 21" + } + ] + } + ], + "index": 21 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 105, + 83, + 487, + 298 + ], + "blocks": [ + { + "bbox": [ + 48, + 58, + 452, + 80 + ], + "lines": [ + { + "bbox": [ + 48, + 58, + 452, + 80 + ], + "spans": [ + { + "bbox": [ + 48, + 58, + 452, + 80 + ], + "type": "text", + "content": "Table 6 Comparison of structure and accuracy of different Neural Network models in IDS for IoT-SDN network" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 83, + 487, + 298 + ], + "lines": [ + { + "bbox": [ + 105, + 83, + 487, + 298 + ], + "spans": [ + { + "bbox": [ + 105, + 83, + 487, + 298 + ], + "type": "table", + "html": "
DatasetCICIDS2017CICIDS2017CICIDS2017CICIDS2017
Categories15151515
ModelLSTMLSTM+CNNCNNEIDM
Layers1011812
Parameters5638612795349748735
Structure detailsDense (64)Dense (64)Dense (120)
Dense (128)Conv1D (64, 10)Conv1D (16,30)Conv1D(80,20)
LSTM (128)Conv1D (64, 10)Conv1D (16,30)MaxPooling1D (2)
LSTM (256)MaxPooling1D (2)MaxPooling1D (2)Dense (120)
Dense (128)LSTM (128)Flatten()Dense (100)
Dense (48)LSTM (64)Dense (32)Dense (80)
Dense (15)Dense (64)Dense (15)Dense (60)
Dense (15)Dense (60)
Dense (40)
Dense (15)
Training Accuracy (%)97.72%98.77%97.92%99.57%
Testing Accuracy (%)93.86%95.75%94.74%99.56%
", + "image_path": "69d318352c9083ca1c5ac983dc98578e751733e24bc8f5dcb0a365155d70ef36.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "spans": [ + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "text", + "content": "mitigate scale discrepancies. Missing values were imputed to preserve the integrity of the data. The LabelEncoder[48] was utilized to convert labels into a format suitable for ML techniques. An essential aspect of our methodology is to divide the selected dataset into training and testing subsets. For the first RQ, we adopted " + }, + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "text", + "content": " training and " + }, + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "inline_equation", + "content": "20\\%" + }, + { + "bbox": [ + 47, + 317, + 289, + 509 + ], + "type": "text", + "content": " testing, aligning with standard practices in ML model development. This adjustment was made to accommodate the different requirements of each research phase. As shown in Table 1, the dataset has five classes (Benign, DDoS, DoS, Brute force, and Port scan) with significantly more entries than the remaining ten classes, which contain fewer samples. SMOTE [49] with auto-sampling was employed to address the class imbalance issue in the dataset. This technique effectively augmented the representation of underrepresented classes, leading to a more balanced dataset for training purposes." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 517, + 103, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 517, + 103, + 528 + ], + "spans": [ + { + "bbox": [ + 48, + 517, + 103, + 528 + ], + "type": "text", + "content": "4.3.2. CNN" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 47, + 529, + 290, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 529, + 290, + 578 + ], + "spans": [ + { + "bbox": [ + 47, + 529, + 290, + 578 + ], + "type": "text", + "content": "In our research, we deployed a CNN-based IDS tailored for our experimental testbed. The configuration details of the CNN model, including its layers, parameters, and architecture specifics, are outlined in Table 2." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 586, + 110, + 598 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 586, + 110, + 598 + ], + "spans": [ + { + "bbox": [ + 48, + 586, + 110, + 598 + ], + "type": "text", + "content": "4.3.3. LSTM" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 47, + 599, + 290, + 647 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 599, + 290, + 647 + ], + "spans": [ + { + "bbox": [ + 47, + 599, + 290, + 647 + ], + "type": "text", + "content": "In our investigation, we implemented an LSTM-based IDS specifically for our testbeds. The detailed architecture and parameters of the LSTM model, crucial for its operation in our IDS, are thoroughly presented in Table 2." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 656, + 225, + 669 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 656, + 225, + 669 + ], + "spans": [ + { + "bbox": [ + 48, + 656, + 225, + 669 + ], + "type": "text", + "content": "4.3.4. Hybrid model of LSTM and CNN" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 47, + 669, + 290, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 669, + 290, + 741 + ], + "spans": [ + { + "bbox": [ + 47, + 669, + 290, + 741 + ], + "type": "text", + "content": "In our exploration, we implemented a hybrid LSTM and CNN architectures model to create an advanced IDS tailored to our experimental setup. This architecture has already been tested in various scenarios [50][51][43]. The intricate configuration of this hybrid LSTM and CNN model, which leverages the strengths of both LSTM and CNN to enhance" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 317, + 481, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 317, + 481, + 328 + ], + "spans": [ + { + "bbox": [ + 303, + 317, + 481, + 328 + ], + "type": "text", + "content": "detection capabilities, is detailed in Table 2." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 329, + 546, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 329, + 546, + 556 + ], + "spans": [ + { + "bbox": [ + 302, + 329, + 546, + 556 + ], + "type": "text", + "content": "The goal of using the hybridization of LSTM and CNN is twofold. First, CNN can drop the non-impactful features and select only the impactful ones (feature engineering). At the same time, it helps to learn the features in a Spatial Hierarchical manner [52]. Second, from our dataset, we got 77 features. As it is unknown which features are impactful from the given features, we applied a 2 1-dimensional CNN layer followed by a max-pooling layer to find the impactful features by learning the 10 nearby features together (kernel size 10). This helps us to create new feature representations where the impactful ones are sustained. Later, we fed these newly derived features directly to 2 LSTM layers. This step helps to learn the spatial and temporal features from CNN, resulting in feature representations presented in context and awarded. Finally, we applied 2 Dense layers to regress the feature representations generated from previous CNN and LSTM layers into 15 classes. This process helps us learn the input features more deeply and increase the classification accuracy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 566, + 428, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 566, + 428, + 578 + ], + "spans": [ + { + "bbox": [ + 303, + 566, + 428, + 578 + ], + "type": "text", + "content": "4.4. Experimental Design" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 578, + 545, + 734 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 578, + 545, + 734 + ], + "spans": [ + { + "bbox": [ + 302, + 578, + 545, + 734 + ], + "type": "text", + "content": "To address RQ1, we designed a testbed incorporating two Raspberry Pi 4 Model B units as edge gateways. Each unit is equipped with 8GB of RAM and a 1.5GHz 64-bit quad-core CPU, providing a realistic environment for evaluating the computational impact of ML-based IDS at the edge gateway. Our study evaluates the performance of seven ML-based IDS models: DT, KNN, RF, LSTM, CNN, EIDM, and a hybrid of LSTM and CNN model, selected for their established effectiveness in cybersecurity. We conducted controlled experiments in IoT-edge networks to assess these IDS models, simulating a range of cyber threats(e.g., BENIGN, DDoS, DoS, Brute force attacks, and the Port scan) using Kali Linux [53]. These experiments" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "spans": [ + { + "bbox": [ + 492, + 755, + 543, + 767 + ], + "type": "text", + "content": "Page 6 of 21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 48, + 83, + 454, + 150 + ], + "blocks": [ + { + "bbox": [ + 48, + 57, + 223, + 79 + ], + "lines": [ + { + "bbox": [ + 48, + 57, + 223, + 79 + ], + "spans": [ + { + "bbox": [ + 48, + 57, + 223, + 79 + ], + "type": "text", + "content": "Table 3 Performance Comparison of ML-based IDS" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 48, + 83, + 454, + 150 + ], + "lines": [ + { + "bbox": [ + 48, + 83, + 454, + 150 + ], + "spans": [ + { + "bbox": [ + 48, + 83, + 454, + 150 + ], + "type": "table", + "html": "
DTKNNRFLSTMLSTM+CNNCNN
Accuracy0.99850.99670.99810.93860.95750.9474
Precision0.99850.99660.99800.97710.98770.9792
Recall0.99850.99670.99810.95240.96450.9611
F1-Score0.99850.99660.99800.96460.97600.9701
", + "image_path": "58dc9bf0185249aca304b6bf51cdc56444b2584dad26b32bd6bb1216bfa9dc43.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 142, + 164, + 455, + 360 + ], + "blocks": [ + { + "bbox": [ + 142, + 164, + 455, + 360 + ], + "lines": [ + { + "bbox": [ + 142, + 164, + 455, + 360 + ], + "spans": [ + { + "bbox": [ + 142, + 164, + 455, + 360 + ], + "type": "image", + "image_path": "0ce2d9d2a60fcb651c812ff09c8140b466b45113ba825a0cbc96ce8abb6d1368.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 375, + 463, + 386 + ], + "lines": [ + { + "bbox": [ + 130, + 375, + 463, + 386 + ], + "spans": [ + { + "bbox": [ + 130, + 375, + 463, + 386 + ], + "type": "text", + "content": "Figure 1: IoT-edge testbed topology, illustrating non-SDN and SDN-enabled setups." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 412, + 288, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 412, + 288, + 448 + ], + "spans": [ + { + "bbox": [ + 49, + 412, + 288, + 448 + ], + "type": "text", + "content": "enabled us to analyze the IDS models' impact on critical performance metrics, specifically CPU usage, CPU load, and energy consumption." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 448, + 288, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 448, + 288, + 543 + ], + "spans": [ + { + "bbox": [ + 49, + 448, + 288, + 543 + ], + "type": "text", + "content": "To address RQ2, we extended our testbed by integrating the edge gateway with the Ryu controller, establishing an SDN-based environment. Ryu, an open-source Python-based SDN controller [54], provides centralized traffic management, enhancing resource allocation and security analysis. We further utilized Mininet [55] to simulate a realistic SDN infrastructure consisting of eighteen hosts, six switches, and a Ryu controller, mirroring real-world network conditions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 50, + 553, + 109, + 564 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 553, + 109, + 564 + ], + "spans": [ + { + "bbox": [ + 50, + 553, + 109, + 564 + ], + "type": "text", + "content": "4.5. Metrics" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 50, + 566, + 288, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 566, + 288, + 624 + ], + "spans": [ + { + "bbox": [ + 50, + 566, + 288, + 624 + ], + "type": "text", + "content": "We evaluated CPU usage, CPU load, and energy consumption in our test beds in the context of ML-based IDS during cyber threat scenarios. We employed the ANOVA[56] to ensure an objective assessment of the performance of various ML-based IDS." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 50, + 635, + 178, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 635, + 178, + 647 + ], + "spans": [ + { + "bbox": [ + 50, + 635, + 178, + 647 + ], + "type": "text", + "content": "4.5.1. CPU Load CPU Usage" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 50, + 647, + 288, + 742 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 647, + 288, + 742 + ], + "spans": [ + { + "bbox": [ + 50, + 647, + 288, + 742 + ], + "type": "text", + "content": "IDS, especially at the edge and SDN environments. CPU usage measures the percentage of the CPU's current capacity, reflecting how much processing power is dedicated to task execution. High CPU usage in an IDS can signal extensive computational demands, potentially impacting the performance of other tasks and system responsiveness, a concern in resource-limited IoT settings. Efficient IDS, especially those utilizing ML techniques, must manage CPU" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 412, + 544, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 412, + 544, + 614 + ], + "spans": [ + { + "bbox": [ + 305, + 412, + 544, + 614 + ], + "type": "text", + "content": "usage carefully to balance detection accuracy with minimal resource use. Excessive CPU usage can slow IDS's real-time network traffic processing, leading to delays or missed attack detection. On the other hand, CPU load indicates the number of processes waiting to be executed, providing an understanding of the CPU's workload. An increase in CPU load might suggest heavy network traffic or numerous attack attempts, highlighting the risk of system overload. Monitoring CPU load allows for early identification of potential bottlenecks, ensuring that IDS operations do not adversely impact system performance. In SDN-enabled IoT edge systems, adept CPU load management is vital to distribute tasks between IDS and other network efficient functions, ensuring optimal resource allocation and system performance. Both CPU usage and load are pivotal metrics for assessing IDS efficacy in environments where resources are constrained, e.g., at the edge gateway[57][58][59]." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 626, + 449, + 636 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 626, + 449, + 636 + ], + "spans": [ + { + "bbox": [ + 305, + 626, + 449, + 636 + ], + "type": "text", + "content": "4.5.2. CPU Performance Metrics" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 638, + 543, + 731 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 638, + 543, + 731 + ], + "spans": [ + { + "bbox": [ + 305, + 638, + 543, + 731 + ], + "type": "text", + "content": "To assess the computational impact of ML-based IDS, we analyze both CPU load and CPU usage, as these metrics provide complementary insights into system performance. CPU usage is typically expressed as a percentage, indicating the proportion of processing power utilized at a given moment. In contrast, CPU load is presented as a numerical value, representing the average number of active processes waiting for CPU execution over a specific time interval. Moreover," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "spans": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "type": "text", + "content": "Page 7 of 21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 47, + 54, + 290, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 54, + 290, + 174 + ], + "spans": [ + { + "bbox": [ + 47, + 54, + 290, + 174 + ], + "type": "text", + "content": "while CPU load can be converted into a percentage, it provides a more detailed view of system stress, especially in multi-core environments. In a multi-core processor, a load value of 1.0 on a single-core system indicates full utilization. In contrast, on a quad-core system, a load of 1.0 suggests that only " + }, + { + "bbox": [ + 47, + 54, + 290, + 174 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 47, + 54, + 290, + 174 + ], + "type": "text", + "content": " of the total available processing capacity is used. This distinction is crucial when interpreting our results, as high CPU load does not always imply that the system is at risk of overutilization—it depends on the number of available processing cores and the workload distribution." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 183, + 173, + 195 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 183, + 173, + 195 + ], + "spans": [ + { + "bbox": [ + 48, + 183, + 173, + 195 + ], + "type": "text", + "content": "4.5.3. Energy Consumption" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 196, + 289, + 576 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 196, + 289, + 576 + ], + "spans": [ + { + "bbox": [ + 49, + 196, + 289, + 576 + ], + "type": "text", + "content": "Energy consumption, often measured in watt-hours or joules, quantifies the amount of energy a device or system expended during its operation. In IoT hardware, where many devices are battery-powered or operate in energy-constrained environments, efficient energy consumption is desirable and necessary. Devices (e.g., sensors, actuators) and even more complex IoT nodes must be designed to perform their tasks while consuming minimal energy, ensuring longevity, and reducing the need for frequent battery replacements or recharges. Moreover, IoT devices integrated with SDN bring a new dimension to the energy conversation; SDN centralizes network control, dynamically optimizing network resources based on real-time demands. Although this centralization offers enhanced flexibility and scalability, it also means that the network's core components must be energy efficient. In IoT systems, where potentially thousands or even millions of devices communicate and exchange data, even minor inefficiencies in energy consumption can accumulate, leading to significant energy drains. Integrating ML-based IDS into the edge gateway emphasizes the need to consider energy metrics critically. ML-based IDS are inherently data-intensive, requiring substantial computational resources to process large datasets for detecting and mitigating security threats. Although these systems offer invaluable security enhancements, their operation can be energy-intensive. Therefore, measuring and optimizing the energy consumption of ML-based IDS is crucial to ensure they deliver effective security measures without unduly burdening the system's energy resources. This balance is essential for maintaining the sustainability and efficiency of the edge gateway, where energy efficiency is often a key concern." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 47, + 577, + 290, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 577, + 290, + 662 + ], + "spans": [ + { + "bbox": [ + 47, + 577, + 290, + 662 + ], + "type": "text", + "content": "We employed PowerTop [60], a robust tool, to precisely gauge and examine the energy consumption in two separate testbed configurations: the edge gateway integrated with SDN and without SDN. PowerTop's sophisticated monitoring capabilities allowed us to gain insights into these testbeds' energy consumption patterns and processor activity." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 670, + 179, + 682 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 670, + 179, + 682 + ], + "spans": [ + { + "bbox": [ + 48, + 670, + 179, + 682 + ], + "type": "text", + "content": "4.5.4. Designed cyber threats" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 47, + 683, + 290, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 47, + 683, + 290, + 732 + ], + "spans": [ + { + "bbox": [ + 47, + 683, + 290, + 732 + ], + "type": "text", + "content": "For our research, we focused on analyzing DDoS, DoS, brute force attacks, and the port scan. We chose these specific types of attacks since they were already categorized in the employed dataset. These cyber threats are prevalent and" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 54, + 544, + 80 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 54, + 544, + 80 + ], + "spans": [ + { + "bbox": [ + 303, + 54, + 544, + 80 + ], + "type": "text", + "content": "pose substantial risks in the field of cybersecurity. Below, a concise summary of each is presented:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 320, + 88, + 546, + 625 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 320, + 88, + 545, + 245 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 88, + 545, + 245 + ], + "spans": [ + { + "bbox": [ + 320, + 88, + 545, + 245 + ], + "type": "text", + "content": "- A Denial-of-Service (DoS): At the edge, DoS attacks are critical cybersecurity threats that disrupt device and service operations by flooding systems with excessive requests and consuming vital resources (e.g., bandwidth, processing power, and memory). This overload prevents the system from serving legitimate users, blocking access to essential operations. The distributed, resource-constrained nature of the edge makes them especially susceptible to DoS attacks. The vulnerability of these devices, coupled with their interconnectedness, means that an attack on a single device can significantly compromise the entire network's functionality and security [61]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 320, + 251, + 545, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 251, + 545, + 418 + ], + "spans": [ + { + "bbox": [ + 320, + 251, + 545, + 418 + ], + "type": "text", + "content": "- A distributed denial-of-service (DDoS): A DDoS attack is a coordinated effort where multiple attackers from different locations flood a specific target, such as a server or network at the edge, with excessive traffic. The goal is to deplete the target's resources, causing severe service disruptions or a complete shutdown. Unlike traditional DoS attacks, which come from a single source, DDoS attacks are distributed across numerous sources, making them harder to defend against. This distributed nature makes DDoS attacks especially dangerous at the edge, where the interconnected and resource-constrained devices can exacerbate the attack's impact, potentially crippling the entire network [62]." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 320, + 426, + 546, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 426, + 546, + 546 + ], + "spans": [ + { + "bbox": [ + 320, + 426, + 546, + 546 + ], + "type": "text", + "content": "- Brute Force: A brute force attack involves an attacker systematically attempting to gain unauthorized access to a system by trying every possible combination, such as trying every key until one works. With its many interconnected devices and varying security levels, the edge is especially vulnerable to such attacks. Attackers exploit these weaknesses by repeatedly guessing passwords, encryption keys, or access codes, which seriously threatens the integrity and confidentiality of data at the edge gateway[63]." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 320, + 553, + 546, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 320, + 553, + 546, + 625 + ], + "spans": [ + { + "bbox": [ + 320, + 553, + 546, + 625 + ], + "type": "text", + "content": "- Port Scan:A port scan aims to identify a target system's open ports. By identifying open ports and the services running on them at the edge, attackers can uncover and exploit vulnerabilities, posing a serious threat to the security and integrity of the edge gateway[64]." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 634, + 540, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 634, + 540, + 658 + ], + "spans": [ + { + "bbox": [ + 302, + 634, + 540, + 658 + ], + "type": "text", + "content": "4.5.5. Analysis method for energy consumption, CPU usage, CPU load" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 658, + 545, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 658, + 545, + 743 + ], + "spans": [ + { + "bbox": [ + 302, + 658, + 545, + 743 + ], + "type": "text", + "content": "We used ANOVA to assess our observed results. ANOVA is an indispensable statistical tool for testing the null hypothesis that posits the equivalence of group means. Our study specifically employed one-way ANOVA to examine the impact of a singular independent variable on the evaluated systems. This method relies on several crucial assumptions, including the necessity for the data to exhibit" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 754, + 249, + 767 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 249, + 767 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 249, + 767 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 486, + 754, + 539, + 767 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 754, + 539, + 767 + ], + "spans": [ + { + "bbox": [ + 486, + 754, + 539, + 767 + ], + "type": "text", + "content": "Page 8 of 21" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 55, + 288, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 55, + 288, + 90 + ], + "spans": [ + { + "bbox": [ + 49, + 55, + 288, + 90 + ], + "type": "text", + "content": "a normal distribution, the variances between groups being equal (homogeneity of variance), and all observations being independent." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 91, + 289, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 91, + 289, + 580 + ], + "spans": [ + { + "bbox": [ + 49, + 91, + 289, + 580 + ], + "type": "text", + "content": "In addition, we conducted 15 separate tests on ML-based IDS to measure CPU load, CPU usage, and energy consumption under various cyber threats. This rigorous approach allowed us to leverage the F statistic, which quantifies the variance ratio between the means of different groups to the variance in the groups. A significant F-statistic, together with a p-value of " + }, + { + "bbox": [ + 49, + 91, + 289, + 580 + ], + "type": "inline_equation", + "content": "\\leq 0.05" + }, + { + "bbox": [ + 49, + 91, + 289, + 580 + ], + "type": "text", + "content": ", denotes statistically significant differences between group means, underscoring the efficacy of our testing methodology. By implementing this robust statistical framework, we have thoroughly evaluated the performance of various ML-based IDS models in response to different cyber threats. This analysis has allowed us to identify specific models that demonstrate resilience or efficiency against multiple attacks and require increased computational resources or energy consumption. While CPU load is a key performance metric for IDS evaluation, it is also crucial to consider its impact on IoT device availability and reliability. Excessive CPU consumption by an IDS can degrade the device's primary functions, leading to slow response times or system failures. This is especially critical in real-time applications such as healthcare, industrial automation, and smart home security, where device downtime can have serious consequences. An IDS must enhance security without inadvertently causing an attack such as a DDoS condition due to resource exhaustion. In addition, through these fifteen iterations of testing, ANOVA has enabled us to validate significant differences in IDS performance metrics (e.g., detection accuracy, false positive rates), CPU load, CPU usage, and energy consumption across diverse scenarios. This methodological approach provides a detailed examination of how different IDS models respond to varied threats, establishing a solid statistical foundation for assessing the efficacy of each model in a controlled environment. By distinguishing between performance differences attributable to the models' inherent capabilities and those due to random variation, our use of ANOVA has proven to be critical. It aids in identifying the most resource-efficient and reliable IDS, thereby guiding the selection process for optimal cybersecurity defenses and enhancing our management and understanding of IDS performance under cyber threat conditions [65] [66]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 50, + 589, + 117, + 601 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 589, + 117, + 601 + ], + "spans": [ + { + "bbox": [ + 50, + 589, + 117, + 601 + ], + "type": "text", + "content": "4.6. TestSuite" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 602, + 289, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 602, + 289, + 733 + ], + "spans": [ + { + "bbox": [ + 49, + 602, + 289, + 733 + ], + "type": "text", + "content": "To initiate the research work presented in this paper and to facilitate the environment for further research and testing, we introduce a versatile test suite designed to experiment with and evaluate ML-based IDS in SDN environments. Unlike conventional experimental testbeds, our test suite is an extensible framework equipped with predefined APIs and a selection of pre-integrated algorithms, facilitating the seamless integration and testing of novel IDS models. Another good contribution to our test suite is that users can execute their experiments on it without Raspberry Pi or any other hardware support. As discussed in the previous" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 305, + 55, + 544, + 280 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 55, + 544, + 280 + ], + "spans": [ + { + "bbox": [ + 305, + 55, + 544, + 280 + ], + "type": "text", + "content": "paragraph, the test suite is developed following the plug-in architecture feature. This ensures that the user can easily integrate their algorithm into the test suite and test the accuracy, energy consumption, and CPU usage with or without security threats. Users can create their own IoT-SDN network and complexity in the network and generate any number of security breaching attacks. This approach not only simplifies the validation process of IDS models in a realistic network scenario but also encourages the exploration of innovative IDS methodologies by providing a solid foundation of tools and benchmarks. We have made the test suite available with the same configuration discussed in Section 4.4. We integrated the same tools for creating an IoT-SDN network, generating security attacks, and measuring IDS accuracy, energy consumption, CPU usage, etc. Through its design, the test suite aims to advance the development and thorough evaluation of cutting-edge IDS solutions, significantly enhancing network security in the era of SDN." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 305, + 297, + 498, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 297, + 498, + 310 + ], + "spans": [ + { + "bbox": [ + 305, + 297, + 498, + 310 + ], + "type": "text", + "content": "5. Experimental Results and Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 314, + 544, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 314, + 544, + 384 + ], + "spans": [ + { + "bbox": [ + 305, + 314, + 544, + 384 + ], + "type": "text", + "content": "This section discusses our experimental results and findings. After presenting our results, we conducted an in-depth statistical analysis using ANOVA. This analysis aims to illuminate the implications and insights that emerge from the experimental results, providing an understanding of the efficacy and nuances of each IDS under study." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 305, + 396, + 468, + 418 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 396, + 468, + 418 + ], + "spans": [ + { + "bbox": [ + 305, + 396, + 468, + 418 + ], + "type": "text", + "content": "5.1. Experimental finding for RQ1 CPU Load:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 420, + 544, + 680 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 420, + 544, + 680 + ], + "spans": [ + { + "bbox": [ + 305, + 420, + 544, + 680 + ], + "type": "text", + "content": "We tested ML-based IDSs under various cyberattack scenarios to assess their impact and strain on our testbed. The types of cyberattacks we considered include DDoS, DoS, brute force attacks, and the port scan. Moreover, we conducted the ANOVA focusing on CPU load variations in our testbed. Figure 2 illustrates a comparative analysis of the average CPU load among different ML-based IDS models in the presence of various types of cyberattacks. The DL-based IDS (CNN, LSTM, combined model of LSTM and CNN, and EIDM) consistently maintain lower CPU loads across all attack types, demonstrating their efficiency in resource utilization during inference. In contrast, traditional ML-based IDS such as KNN, DT, and RF exhibit significantly higher CPU loads, especially under brute force and DDoS attacks, with KNN and DT being the most resource-intensive. This is because DL models, such as CNN and LSTM, efficiently handle computations in parallel and are optimized for inference. In contrast, traditional models (e.g., KNN and DT) require more repeated, resource-heavy calculations, such as distance computations in KNN or recursive splitting in DTs, especially under large-scale attacks." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 682, + 391, + 693 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 682, + 391, + 693 + ], + "spans": [ + { + "bbox": [ + 305, + 682, + 391, + 693 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 694, + 543, + 741 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 694, + 543, + 741 + ], + "spans": [ + { + "bbox": [ + 305, + 694, + 543, + 741 + ], + "type": "text", + "content": "We conducted an ANOVA, and the results presented in Table 4 illuminate significant differences in CPU load among diverse ML-based IDS under DDoS, underscored by F-statistic of 60.40 and a p-value " + }, + { + "bbox": [ + 305, + 694, + 543, + 741 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 305, + 694, + 543, + 741 + ], + "type": "text", + "content": ". This F-statistic delineates" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "spans": [ + { + "bbox": [ + 170, + 34, + 423, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 756, + 538, + 766 + ], + "type": "text", + "content": "Page 9 of 21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 54, + 448, + 260 + ], + "blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 54, + 448, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 54, + 448, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 54, + 448, + 260 + ], + "type": "image", + "image_path": "2bdb3a6d783df896b48d0f412798353d9e3d3b880f7d4365d283f017702a4c35.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 280, + 440, + 293 + ], + "lines": [ + { + "bbox": [ + 153, + 280, + 440, + 293 + ], + "spans": [ + { + "bbox": [ + 153, + 280, + 440, + 293 + ], + "type": "text", + "content": "Figure 2: The Average CPU load of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 337, + 508, + 394 + ], + "blocks": [ + { + "bbox": [ + 48, + 313, + 285, + 335 + ], + "lines": [ + { + "bbox": [ + 48, + 313, + 285, + 335 + ], + "spans": [ + { + "bbox": [ + 48, + 313, + 285, + 335 + ], + "type": "text", + "content": "Table 4 ANOVA results: CPU Load for ML-based IDS under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 337, + 508, + 394 + ], + "lines": [ + { + "bbox": [ + 85, + 337, + 508, + 394 + ], + "spans": [ + { + "bbox": [ + 85, + 337, + 508, + 394 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.873601.6460.40< 0.05
in groups915426.4959.63
Total9727036.36278.73
", + "image_path": "a5849353a126f0c5cd9481a994454c5ef073b83de215a78893cca68077303183.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 413, + 289, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 413, + 289, + 533 + ], + "spans": [ + { + "bbox": [ + 48, + 413, + 289, + 533 + ], + "type": "text", + "content": "the contrast in CPU load variance across ML-based IDSs against the variance in, highlighting a significant influence of IDS selection on CPU load. The remarkably low p-value corroborates this finding, conclusively demonstrating the substantial differences in CPU load among the IDSs. Furthermore, we observed similar p-values " + }, + { + "bbox": [ + 48, + 413, + 289, + 533 + ], + "type": "inline_equation", + "content": "(< 0.05)" + }, + { + "bbox": [ + 48, + 413, + 289, + 533 + ], + "type": "text", + "content": " across other attacks, including brute force, DoS, and the port scan, so we do not report them. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS under different cyber threats." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 65, + 541, + 98, + 554 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 541, + 98, + 554 + ], + "spans": [ + { + "bbox": [ + 65, + 541, + 98, + 554 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 563, + 274, + 730 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 563, + 274, + 730 + ], + "spans": [ + { + "bbox": [ + 63, + 563, + 274, + 730 + ], + "type": "text", + "content": "DL-based IDS, such as CNN, LSTM, and hybrids, perform more efficiently in managing computational demands across diverse types of cyber threats than traditional ML-based IDS, such as KNN, DT, and RF, as they exhibit higher CPU loads at the edge. This pattern suggests that DL-based IDS' intrinsic efficiency is not attack-specific but rooted in their architecture, making them especially suited for real-time applications at edge gateway. These results are expected, as traditional ML-based IDS (e.g., KNN, DT, RF) perform computationally expensive operations during inference, unlike DL-based IDS, which optimizes processing through parallelization and learned feature extraction." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 319, + 413, + 373, + 425 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 319, + 413, + 373, + 425 + ], + "spans": [ + { + "bbox": [ + 319, + 413, + 373, + 425 + ], + "type": "text", + "content": "CPU Usage:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 426, + 545, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 426, + 545, + 592 + ], + "spans": [ + { + "bbox": [ + 302, + 426, + 545, + 592 + ], + "type": "text", + "content": "Figure 3 compares the average CPU usage of various ML-based IDS models under different cyberattacks. The KNN model consistently exhibits the highest CPU usage across all attack types, indicating its high computational demand, which limits its use in resource-constrained environments. The RF and DT models are also CPU-bound, though they are less intensive than KNN. In contrast, the LSTM model demonstrates the lowest CPU usage, making it the most efficient option for scenarios where minimizing resource consumption is critical. The hybrid of the LSTM and CNN model, along with the CNN and EIDM models, offer a balance between inference accuracy and computational efficiency, making them viable choices for environments with moderate resource availability." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 593, + 392, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 593, + 392, + 604 + ], + "spans": [ + { + "bbox": [ + 303, + 593, + 392, + 604 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 604, + 545, + 735 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 604, + 545, + 735 + ], + "spans": [ + { + "bbox": [ + 302, + 604, + 545, + 735 + ], + "type": "text", + "content": "Table 5 presents our ANOVA results. Our results reveal significant differences in CPU load among diverse ML-based IDS under DDoS, as evidenced by a compelling F-statistic of 60.39 and a p-value " + }, + { + "bbox": [ + 302, + 604, + 545, + 735 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 604, + 545, + 735 + ], + "type": "text", + "content": ". This F-statistic highlights the variance in CPU load across IDS groups compared to the variance in, underscoring a significant impact of IDS selection on CPU load. The exceedingly small p-value further supports this conclusion. Moreover, we observed similar p-values (below 0.05) across various cyber threats, such as brute force, DoS, and the port scan, so we do not report those results." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 486, + 754, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 754, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 486, + 754, + 543, + 766 + ], + "type": "text", + "content": "Page 10 of 21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 55, + 449, + 260 + ], + "blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 55, + 449, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 55, + 449, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 55, + 449, + 260 + ], + "type": "image", + "image_path": "a433945cb1829f61259f6ba9148719bf834e1a76b8a5e34e20d6d88a8c9f6a32.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "lines": [ + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "spans": [ + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "type": "text", + "content": "Figure 3: The Average CPU usage of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 338, + 508, + 396 + ], + "blocks": [ + { + "bbox": [ + 48, + 313, + 288, + 335 + ], + "lines": [ + { + "bbox": [ + 48, + 313, + 288, + 335 + ], + "spans": [ + { + "bbox": [ + 48, + 313, + 288, + 335 + ], + "type": "text", + "content": "Table 5 ANOVA results: CPU Usage for ML-based IDS under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 338, + 508, + 396 + ], + "lines": [ + { + "bbox": [ + 85, + 338, + 508, + 396 + ], + "spans": [ + { + "bbox": [ + 85, + 338, + 508, + 396 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups621609.863601.6460.39< 0.05
in groups915426.4959.62
Total9727036.36278.73
", + "image_path": "3352c2d6279b3c18c32a15035d2fad226977bad5a82e283f7e78eb8d92736391.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 65, + 417, + 99, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 417, + 99, + 428 + ], + "spans": [ + { + "bbox": [ + 65, + 417, + 99, + 428 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 437, + 275, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 437, + 275, + 582 + ], + "spans": [ + { + "bbox": [ + 64, + 437, + 275, + 582 + ], + "type": "text", + "content": "Our analysis reveals that traditional ML-based IDS such as KNN, DT, and RF exhibit increased CPU usage under various cyber threats, thus posing challenges for the edge. Also, LSTM and other DL-based IDS exhibit lower CPU demands. This consistent efficiency across various attacks highlights the benefit of adopting DL-based IDS at the edge gateway. The increased CPU usage of KNN, DT, and RF reflects their reliance on instance-based and tree-splitting operations, which require repeated evaluations. In contrast, DL models efficiently process data in structured layers, reducing computational strain." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 595, + 158, + 606 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 595, + 158, + 606 + ], + "spans": [ + { + "bbox": [ + 63, + 595, + 158, + 606 + ], + "type": "text", + "content": "Energy consumption:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 607, + 289, + 726 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 607, + 289, + 726 + ], + "spans": [ + { + "bbox": [ + 48, + 607, + 289, + 726 + ], + "type": "text", + "content": "Figure 4 shows that the LSTM and DT models are the most energy-efficient across different types of cyberattacks, consistently exhibiting the lowest energy consumption. The CNN model also performs efficiently, with slightly higher energy usage. The LSTM, CNN model hybrid, and EIDM have moderate energy consumption, balancing complexity and efficiency. In contrast, the KNN model has the highest energy consumption across all scenarios, making it less suitable for energy-constrained environments. The RF model falls in between, with moderate energy demands." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 318, + 414, + 406, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 318, + 414, + 406, + 426 + ], + "spans": [ + { + "bbox": [ + 318, + 414, + 406, + 426 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "spans": [ + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "text", + "content": "We conducted the ANOVA, and the results presented in Table 6 reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by F-statistic of 57.44 and a p-value of " + }, + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "text", + "content": ". This F-statistic delineates the contrast in energy consumption variance across the group of IDSs against the variance in, highlighting a significant influence of IDS selection on energy consumption. The extremely low p-value further supports this conclusion, conclusively demonstrating the substantial differences in energy consumption among the IDSs. In addition, we observed similar p-values (" + }, + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 427, + 545, + 618 + ], + "type": "text", + "content": ") for other cyber threats, such as brute force, DoS, and the port scan, so we do not report the results. This observation demonstrates significant differences in energy consumed among various ML-based IDS when faced with differing cyber threats." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 11 of 21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 55, + 451, + 260 + ], + "blocks": [ + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "type": "image", + "image_path": "8790c0ec8c8e4948e20afe7e459c53de3ac9970a2ea4575d17aa9e6a0701d68f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "lines": [ + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "spans": [ + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "type": "text", + "content": "Figure 4: The Average Energy consumption of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 337, + 509, + 394 + ], + "blocks": [ + { + "bbox": [ + 48, + 311, + 323, + 333 + ], + "lines": [ + { + "bbox": [ + 48, + 311, + 323, + 333 + ], + "spans": [ + { + "bbox": [ + 48, + 311, + 323, + 333 + ], + "type": "text", + "content": "Table 6 ANOVA results: energy consumption for ML-based IDS under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "lines": [ + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "spans": [ + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups647732.077955.3457.44< 0.05
in groups9813571.72138.48
Total10461303.80589.45
", + "image_path": "1af4aced79d75a548723f09e225664bbb96f6d6700d00c43968aa178dc0a5c72.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 65, + 414, + 100, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 414, + 100, + 426 + ], + "spans": [ + { + "bbox": [ + 65, + 414, + 100, + 426 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 436, + 276, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 436, + 276, + 602 + ], + "spans": [ + { + "bbox": [ + 64, + 436, + 276, + 602 + ], + "type": "text", + "content": "Our analysis concludes a marked discrepancy in energy consumption, with traditional ML-based IDS such as KNN, RF, and DT exhibiting significantly higher energy consumption under cyber threats such as DDoS and brute force, a drawback for energy-constrained at the edge. In contrast, DL-based IDS models, LSTM, CNN, EIDM, and their hybrids excel in energy efficiency, making them the preferable choice for the edge. Traditional ML models' higher energy consumption results from their iterative computations and lack of optimized inference paths, making them less viable for real-time IoT applications where power efficiency is crucial." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 48, + 618, + 215, + 631 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 618, + 215, + 631 + ], + "spans": [ + { + "bbox": [ + 48, + 618, + 215, + 631 + ], + "type": "text", + "content": "5.2. Experimental finding for RQ2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 632, + 289, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 632, + 289, + 666 + ], + "spans": [ + { + "bbox": [ + 48, + 632, + 289, + 666 + ], + "type": "text", + "content": "This section presents our experimental results for IoT-edge devices with SDN integration during real-time cyber threats." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 666, + 100, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 666, + 100, + 677 + ], + "spans": [ + { + "bbox": [ + 48, + 666, + 100, + 677 + ], + "type": "text", + "content": "CPU Load:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 678, + 289, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 678, + 289, + 739 + ], + "spans": [ + { + "bbox": [ + 48, + 678, + 289, + 739 + ], + "type": "text", + "content": "In Figure 5, we illustrate the CPU load of various ML-based IDS models under different cyberattacks in an SDN-enabled at the edge gateway. The analysis shows that KNN and DT models have the highest CPU load, especially during DDoS and DoS, indicating significant resource demands at" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 413, + 545, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 413, + 545, + 496 + ], + "spans": [ + { + "bbox": [ + 302, + 413, + 545, + 496 + ], + "type": "text", + "content": "the edge. Conversely, the LSTM model demonstrates the lowest CPU load, highlighting its efficiency in resource management. The CNN model also performs efficiently but not as well as LSTM. The LSTM and CNN model hybrid, similar to EIDM, offers balanced performance, making them suitable for scenarios where moderate CPU efficiency is required at the edge." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 303, + 497, + 392, + 508 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 497, + 392, + 508 + ], + "spans": [ + { + "bbox": [ + 303, + 497, + 392, + 508 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "spans": [ + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "text", + "content": "We conducted an ANOVA for the case of the DDoS attack, and the results are presented in Table 7. The results reveal significant differences in CPU load among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 142.57 and a p-value of " + }, + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "text", + "content": ". This F-statistic highlights the variance in CPU load across IDSs compared to the variance in them, indicating a significant impact of IDS selection on CPU load. In addition, consistent p-values " + }, + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "inline_equation", + "content": "(< 0.05)" + }, + { + "bbox": [ + 302, + 509, + 545, + 663 + ], + "type": "text", + "content": " were observed across other cyber threats, including brute force, DoS, and the port scan, and we do not report the result. This reinforces the presence of marked differences in CPU load among diverse ML-based IDS when subjected to different cyber threats." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 12 of 21" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 55, + 452, + 260 + ], + "blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 55, + 452, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 55, + 452, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 55, + 452, + 260 + ], + "type": "image", + "image_path": "a9fbe4523e7a90197894b5f92e5e5c2fa314185d12790a490d4434c61aff811f.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 153, + 277, + 440, + 290 + ], + "lines": [ + { + "bbox": [ + 153, + 277, + 440, + 290 + ], + "spans": [ + { + "bbox": [ + 153, + 277, + 440, + 290 + ], + "type": "text", + "content": "Figure 5: The Average CPU load of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 333, + 508, + 390 + ], + "blocks": [ + { + "bbox": [ + 48, + 310, + 311, + 331 + ], + "lines": [ + { + "bbox": [ + 48, + 310, + 311, + 331 + ], + "spans": [ + { + "bbox": [ + 48, + 310, + 311, + 331 + ], + "type": "text", + "content": "Table 7 ANOVA results: CPU load for ML-based IDS in SDN under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 333, + 508, + 390 + ], + "lines": [ + { + "bbox": [ + 85, + 333, + 508, + 390 + ], + "spans": [ + { + "bbox": [ + 85, + 333, + 508, + 390 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61184.21197.36142.57< 0.05
in groups91125.971.38
Total971310.1813.50
", + "image_path": "a8c052e149131e14ee8d791a6e088133751de0ae0ea0575bd6291fcfd479610c.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 411, + 99, + 424 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 411, + 99, + 424 + ], + "spans": [ + { + "bbox": [ + 64, + 411, + 99, + 424 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 432, + 274, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 432, + 274, + 563 + ], + "spans": [ + { + "bbox": [ + 63, + 432, + 274, + 563 + ], + "type": "text", + "content": "The findings demonstrate that traditional ML-based IDS, e.g., DT, exhibit elevated loads under DDoS and DoS. In contrast, DL-based IDSs, including EIDM, LSTM, CNN, and their hybrids, demonstrate superior energy efficiency, making them suitable for SDN-enabled at the edge gateway. The integration of SDN helps balance network resource allocation. Yet, traditional ML-based IDS still exhibit higher CPU load due to their design, reinforcing the efficiency advantage of DL-based models in dynamic network environments." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 576, + 117, + 587 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 576, + 117, + 587 + ], + "spans": [ + { + "bbox": [ + 63, + 576, + 117, + 587 + ], + "type": "text", + "content": "CPU Usage:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 588, + 289, + 684 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 588, + 289, + 684 + ], + "spans": [ + { + "bbox": [ + 48, + 588, + 289, + 684 + ], + "type": "text", + "content": "Figure 6 shows that CPU usage across various ML-based IDS models in an SDN-enabled edge gateway is fairly consistent across different attack scenarios. Only minor variations are observed, as CNN, LSTM, and hybrid versions demonstrate relatively lower CPU usage, indicating efficient resource management. The DT, KNN, and RF models also show consistent CPU usage across attacks. The EIDM model balances efficiency and performance well." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 48, + 684, + 137, + 695 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 684, + 137, + 695 + ], + "spans": [ + { + "bbox": [ + 48, + 684, + 137, + 695 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 48, + 696, + 289, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 696, + 289, + 732 + ], + "spans": [ + { + "bbox": [ + 48, + 696, + 289, + 732 + ], + "type": "text", + "content": "We conducted an ANOVA for the results we got for ML-based IDS in SDN under the DDoS attack. The results presented in Table 8 reveal significant differences in CPU" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "spans": [ + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "text", + "content": "usage among diverse ML-based IDS under DDoS attack, underscored by an impressive F-statistic of 5.94 and a p-value of " + }, + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "text", + "content": ". This F-statistic highlights the variance in CPU usage across the group of IDSs compared to the variance in, indicating a significant impact of IDS selection on CPU usage. In addition, we observed a consistently low p-value " + }, + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "inline_equation", + "content": "(< 0.05)" + }, + { + "bbox": [ + 302, + 409, + 546, + 541 + ], + "type": "text", + "content": " for other examined cyber threats (not reported in the paper), including brute force, DoS, and port scan, reinforcing the presence of marked differences in CPU usage among diverse ML-based IDS when subjected to different cyber threats." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 13 of 21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 55, + 448, + 260 + ], + "blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 55, + 448, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 55, + 448, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 55, + 448, + 260 + ], + "type": "image", + "image_path": "3438938ed0f1adb9e28527aebc27ca8fcbe55857e4020f1d3c8eb9e1b3a85be0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "lines": [ + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "spans": [ + { + "bbox": [ + 149, + 280, + 443, + 293 + ], + "type": "text", + "content": "Figure 6: The Average CPU usage of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 338, + 509, + 396 + ], + "blocks": [ + { + "bbox": [ + 48, + 313, + 318, + 335 + ], + "lines": [ + { + "bbox": [ + 48, + 313, + 318, + 335 + ], + "spans": [ + { + "bbox": [ + 48, + 313, + 318, + 335 + ], + "type": "text", + "content": "Table 8 ANOVA results: CPU usage for ML-based IDS in SDN under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 338, + 509, + 396 + ], + "lines": [ + { + "bbox": [ + 85, + 338, + 509, + 396 + ], + "spans": [ + { + "bbox": [ + 85, + 338, + 509, + 396 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups627.974.665.94< 0.05
in groups9171.320.78
Total9799.301.02
", + "image_path": "251f99d34e8d7e9454bfd3edc048190023c2a6c8c23b168d632e267238bde84f.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 65, + 417, + 100, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 65, + 417, + 100, + 428 + ], + "spans": [ + { + "bbox": [ + 65, + 417, + 100, + 428 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 64, + 437, + 276, + 617 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 437, + 276, + 617 + ], + "spans": [ + { + "bbox": [ + 64, + 437, + 276, + 617 + ], + "type": "text", + "content": "In the context of SDN-enhanced IoT, deploying DL-based IDS with advanced models such as CNN, LSTM, EIDM, and their hybrids demonstrates efficient energy consumption. These models achieve reduced CPU usage against brute force and port scan, benefiting from the centralized resource optimization afforded by SDN. Nonetheless, the complexity of DDoS and DoS presents a significant challenge, necessitating increased computational resources. Although SDN optimizes network operations, IDS models such as KNN and RF remain resource-intensive due to their frequent computational overhead. At the same time, DL-based IDS maintains efficiency through batch processing and learned representations." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 63, + 631, + 158, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 631, + 158, + 642 + ], + "spans": [ + { + "bbox": [ + 63, + 631, + 158, + 642 + ], + "type": "text", + "content": "Energy consumption:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 48, + 643, + 289, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 643, + 289, + 739 + ], + "spans": [ + { + "bbox": [ + 48, + 643, + 289, + 739 + ], + "type": "text", + "content": "Figure 7 depicts the average energy consumption of ML-based IDS models under different attacks in an SDN environment. The results indicate that traditional ML models consume more energy, especially during port scans, e.g., DT, KNN, and RF. In contrast, the EIDM model consistently shows lower energy consumption across all attack types, highlighting its efficiency. The LSTM and CNN models display moderate energy usage, including their hybrid" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 415, + 545, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 415, + 545, + 463 + ], + "spans": [ + { + "bbox": [ + 302, + 415, + 545, + 463 + ], + "type": "text", + "content": "version. Compared to non-SDN environments, the increased energy consumption in the SDN setup is attributed to the SDN controller's active role in traffic management and threat response, which demands more energy resources." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 303, + 463, + 392, + 475 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 463, + 392, + 475 + ], + "spans": [ + { + "bbox": [ + 303, + 463, + 392, + 475 + ], + "type": "text", + "content": "Statistical Findings:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "spans": [ + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "text", + "content": "We applied ANOVA on energy consumption data across ML-based IDSs in SDN under DDoS. The results, presented in Table 9, reveal significant differences in energy consumption among diverse ML-based IDS under DDoS, underscored by an impressive F-statistic of 18.27 and a p-value of " + }, + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "text", + "content": ". This F-statistic highlights the variance in energy consumption across a group of IDSs compared to the variance in, indicating a significant impact of IDS selection on energy consumption. Moreover, a consistently low p-value (" + }, + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "inline_equation", + "content": "< 0.05" + }, + { + "bbox": [ + 302, + 475, + 545, + 642 + ], + "type": "text", + "content": ") was observed across other cyber threats, including brute force, DoS, and port scan, so we do not report the results here. This highlights marked differences in CPU usage among diverse ML-based IDS when subjected to examined cyber threats." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 755, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 14 of 21" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 55, + 451, + 260 + ], + "blocks": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "lines": [ + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "spans": [ + { + "bbox": [ + 126, + 55, + 451, + 260 + ], + "type": "image", + "image_path": "76798f9d0ff7469f92dc1968687a668bbe01b8942db9c98e301c11f254bb5a84.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "lines": [ + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "spans": [ + { + "bbox": [ + 131, + 279, + 461, + 291 + ], + "type": "text", + "content": "Figure 7: The Average Energy consumption of ML-based IDS under cyber threats." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 85, + 337, + 509, + 394 + ], + "blocks": [ + { + "bbox": [ + 48, + 311, + 354, + 333 + ], + "lines": [ + { + "bbox": [ + 48, + 311, + 354, + 333 + ], + "spans": [ + { + "bbox": [ + 48, + 311, + 354, + 333 + ], + "type": "text", + "content": "Table 9 ANOVA results: Energy consumption for ML-based IDS in SDN under DDoS." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "lines": [ + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "spans": [ + { + "bbox": [ + 85, + 337, + 509, + 394 + ], + "type": "table", + "html": "
SourceDegrees of FreedomSum of SquaresMean SquareF StatisticP-value
Between groups61263.26210.5418.27< 0.05
in groups911048.2111.51
Total972311.4823.82
", + "image_path": "663548692e8e7e2de3dfdeda62c7ea5cdf8682492fac6d42168c814f8765d763.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 413, + 98, + 426 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 413, + 98, + 426 + ], + "spans": [ + { + "bbox": [ + 64, + 413, + 98, + 426 + ], + "type": "text", + "content": "Finding" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 63, + 436, + 274, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 63, + 436, + 274, + 674 + ], + "spans": [ + { + "bbox": [ + 63, + 436, + 274, + 674 + ], + "type": "text", + "content": "The findings accentuate the distinct energy efficiency profiles of ML-based IDSs when exposed to various cyber threat scenarios. During brute force and the port scan, traditional ML-based IDS such as DT, KNN, and RF are observed to have higher energy consumption. This indicates that these models are not energy-efficient under the examined conditions due to their complex computational frameworks. On the other hand, DL-based IDS and the EIDM show markedly superior energy efficiency. The reduced energy footprint of DL-based IDS is especially advantageous in the context of the SDN-enabled at the edge, where low energy consumption is crucial due to device constraints and the need for long-term, autonomous operation. The reduction in energy consumption observed in DL-based IDS when integrated with SDN highlights the benefits of centralized network control and optimized workload distribution, making them a more sustainable choice for IoT security." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 303, + 412, + 539, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 412, + 539, + 449 + ], + "spans": [ + { + "bbox": [ + 303, + 412, + 539, + 449 + ], + "type": "text", + "content": "5.3. Analyzing the Impact of SDN on CPU Usage, Load, and Energy Efficiency in ML-Based IDS" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 451, + 546, + 607 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 451, + 546, + 607 + ], + "spans": [ + { + "bbox": [ + 302, + 451, + 546, + 607 + ], + "type": "text", + "content": "Figure 8 demonstrates that integrating SDN with ML-based IDS in the edge gateway significantly improves resource efficiency, reducing energy consumption, CPU usage, and CPU load. The most substantial improvement is in CPU usage, where DL-based IDS, e.g., LSTM and CNN, outperform traditional ML models by efficiently handling complex computations through parallel processing. Additionally, SDN integration reduces CPU load by balancing workloads, essential for real-time threat detection in edge gateway. The observed reduction in energy consumption further highlights the approach's suitability for battery-powered edge gateway, confirming its scalability and practicality for real-world applications." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 303, + 624, + 524, + 651 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 624, + 524, + 651 + ], + "spans": [ + { + "bbox": [ + 303, + 624, + 524, + 651 + ], + "type": "text", + "content": "6. ML-Based IDS vs. Signature-Based IDS (Snort)" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 655, + 545, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 655, + 545, + 739 + ], + "spans": [ + { + "bbox": [ + 302, + 655, + 545, + 739 + ], + "type": "text", + "content": "This section compares our ML-based IDS models and the signature-based Snort IDS to evaluate the performance improvements achieved by leveraging ML-based IDS over traditional detection systems. This comparison is essential to highlight the advantages of ML-based approaches regarding resource efficiency, scalability, and adaptability, especially in edge gateway." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 486, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 15 of 21" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 141, + 54, + 452, + 229 + ], + "blocks": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "lines": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 170, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 141, + 54, + 452, + 229 + ], + "lines": [ + { + "bbox": [ + 141, + 54, + 452, + 229 + ], + "spans": [ + { + "bbox": [ + 141, + 54, + 452, + 229 + ], + "type": "image", + "image_path": "3b4e80fa10eb1e2f9e4d8be48c124d93e93944e9df202e0ad45add7539d26c8e.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 49, + 235, + 545, + 258 + ], + "lines": [ + { + "bbox": [ + 49, + 235, + 545, + 258 + ], + "spans": [ + { + "bbox": [ + 49, + 235, + 545, + 258 + ], + "type": "text", + "content": "Figure 8: Reduction in energy consumption, CPU usage, and CPU load for ML-based IDS models with SDN integration in edge gateway." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 284, + 288, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 284, + 288, + 319 + ], + "spans": [ + { + "bbox": [ + 49, + 284, + 288, + 319 + ], + "type": "text", + "content": "The results presented in Table 10 provide a comparative analysis of our ML-based IDS models against the signature-based Snort IDS discussed in other research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 321, + 289, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 321, + 289, + 642 + ], + "spans": [ + { + "bbox": [ + 49, + 321, + 289, + 642 + ], + "type": "text", + "content": "Regarding CPU usage, Snort IDS shows high utilization under heavy traffic due to its reliance on predefined rules and signature matching. In contrast, the ML-based IDS models demonstrate better CPU efficiency. While traditional ML models, e.g., DT and KNN, have higher CPU usage because of iterative computations, DL-based IDS, e.g., LSTM, CNN, and a hybrid of LSTM and CNN, EIDM exhibits lower CPU usage. This is primarily due to DL-based IDS's ability to process data in batches and leverage parallel processing for real-time threat detection. For energy consumption, Table 10 shows that Snort IDS consumes more energy, especially in IoT networks requiring multiple containers. However, our ML-based IDS models, especially DL architectures, e.g., LSTM and EIDM, demonstrate superior energy efficiency. These models optimize resource usage and process data efficiently, making them suitable for resource-constrained edge gateway and highlighting their scalability advantages. Finally, in terms of CPU load, Table 10 indicates that earlier versions of Snort IDS suffer from high CPU load on a single core because of their single-threaded architecture. Although newer versions introduce multi-threading, they still encounter processing bottlenecks under heavy traffic. Conversely, the ML-based IDS models distribute the CPU load more effectively across multiple cores. DL-based IDS, especially LSTM and hybrid architectures, achieve the lowest CPU load levels due to their parallel execution capabilities and efficient handling of sequential data." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 50, + 660, + 120, + 672 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 660, + 120, + 672 + ], + "spans": [ + { + "bbox": [ + 50, + 660, + 120, + 672 + ], + "type": "text", + "content": "7. Discussion" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 50, + 678, + 288, + 736 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 678, + 288, + 736 + ], + "spans": [ + { + "bbox": [ + 50, + 678, + 288, + 736 + ], + "type": "text", + "content": "Our investigations explored the performance metrics of ML-based IDS with various models, especially in IoT-edge devices with and without SDN integration. Our study was primarily evaluating the impact of these models on CPU load, CPU usage, and energy consumption amidst diverse" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 305, + 283, + 545, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 283, + 545, + 737 + ], + "spans": [ + { + "bbox": [ + 305, + 283, + 545, + 737 + ], + "type": "text", + "content": "cyberattack scenarios. The empirical findings revealed significant disparities in resource utilization across different ML-based IDS, shedding light on crucial aspects of their deployment in IoT devices integrated with SDN. The KNN, DT, and RF significantly exhibited higher CPU load, CPU usage, and energy consumption, especially under specific types of cyberattacks. While these models are adept at identifying threats, their resource-intensive nature could pose challenges in the IoT context, where computational resources are often limited. This could lead to diminished performance or instability in environments with constrained resources. Specifically, KNN's higher variance in CPU load and energy consumption, as observed in Tables 4 and 5, stems from its lazy learning approach. Unlike other models, KNN does not build a generalized model during training but instead stores the entire dataset and computes distances at query time. This results in increased processing demands, leading to fluctuations in resource utilization. Such behavior makes KNN less suitable for real-time IDS applications in resource-constrained IoT networks[72] [73]. While CPU load significantly impacts energy consumption, it is not the sole factor. Memory operations, network activity, peripheral devices, and thermal management also contribute to power usage in IoT devices. High data transmission rates and active sensors can increase energy demands, while sustained CPU load may trigger additional energy consumption for cooling mechanisms. Although a strong correlation between CPU load and energy consumption is expected, these factors introduce variations across IDS models. Optimizing IDS efficiency can help balance security and resource constraints in IoT networks. Conversely, the CNN and LSTM models demonstrated greater efficiency in resource utilization. While their architectures are sophisticated and adept at processing complex data structures, they appear to optimize the computational load during inference when employed in IDS. This makes them more suitable for scenarios where resource conservation is critical. However, the complexity of these models introduces its own set of challenges, especially" + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 16 of 21" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 64, + 83, + 530, + 363 + ], + "blocks": [ + { + "bbox": [ + 48, + 58, + 343, + 80 + ], + "lines": [ + { + "bbox": [ + 48, + 58, + 343, + 80 + ], + "spans": [ + { + "bbox": [ + 48, + 58, + 343, + 80 + ], + "type": "text", + "content": "Table 17 Comparative Resource Utilization of ML-Based IDS and Snort IDS Based" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 64, + 83, + 530, + 363 + ], + "lines": [ + { + "bbox": [ + 64, + 83, + 530, + 363 + ], + "spans": [ + { + "bbox": [ + 64, + 83, + 530, + 363 + ], + "type": "table", + "html": "
MetricSnort IDSML-Based IDS (Our Findings)
CPU Usage- High Traffic Conditions: CPU usage can reach its maximum during initialization with many active rules [67].\n- Multi-Core Systems: Snort 3.0 utilizes a significant portion of CPU resources on a multi-core processor [68] [69].- Traditional ML Models (DT, KNN, RF): Tend to exhibit higher CPU usage during real-time cyber threats, especially those requiring intensive computations.\n- DL-Based Models (CNN, LSTM, Hybrid of LSTM and CNN and EIDM): Show lower CPU usage compared to traditional ML models, with LSTM models demonstrating the most efficient utilization due to sequential data processing and parallelization.
Energy Consumption- IoT Deployment: Deployment of Snort on IoT gateways results in considerable energy consumption [70].- Traditional ML-based IDS: Generally consume more energy during inference cycles due to repetitive computations.\n- DL-Based Models: Exhibit better energy efficiency, especially models that combine convolutional and sequential layers, benefiting from optimized processing structures.
CPU Load- Single-Core Utilization: Older Snort versions (pre-3.0) lead to high load on a single core under heavy traffic [71].\n- Multi-Core Systems: Updated versions distribute the load but still face processing bottlenecks under extensive traffic [71].- Traditional ML-based IDS: Often show higher CPU load during complex attack scenarios.\n- DL-Based Models: Maintain a lower CPU load, benefiting from parallel processing capabilities, with hybrid models showing the most balanced load distribution.
", + "image_path": "de6163c30a2befa354ff42e749225c46579ebb595981253974955912f27f9118.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 381, + 289, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 381, + 289, + 404 + ], + "spans": [ + { + "bbox": [ + 48, + 381, + 289, + 404 + ], + "type": "text", + "content": "in terms of training and ongoing maintenance in the dynamic landscape of IoT devices integrated with SDN." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 406, + 289, + 584 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 406, + 289, + 584 + ], + "spans": [ + { + "bbox": [ + 48, + 406, + 289, + 584 + ], + "type": "text", + "content": "The balance between detection efficiency and resource consumption is especially critical at edge gateway, where devices often have limited processing power and energy reserves. This balance is closely tied to several United Nations Sustainable Development Goals (SDGs), especially SDG 9 (Industry, Innovation, and Infrastructure), SDG 11 (Sustainable Cities and Communities), and SDG 13 (Climate Action). Optimizing IDS deployment in smart cities strengthens cybersecurity infrastructure, directly supporting SDG 9 while fostering resilient, sustainable urban environments in line with SDG 11. Furthermore, by prioritizing energy-efficient IDS solutions, this research contributes to SDG 13, promoting responsible resource consumption and mitigating the environmental impact of growing IoT networks [74]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 584, + 289, + 739 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 584, + 289, + 739 + ], + "spans": [ + { + "bbox": [ + 48, + 584, + 289, + 739 + ], + "type": "text", + "content": "To aid IoT developers in selecting appropriate IDS solutions, we provide detailed guidelines in Table 11 and Table 12, outlining the performance trade-offs of seven different ML-based IDS models for IoT devices examined in this paper, both with and without SDN integration. These insights enable developers to make informed decisions, ensuring the optimal balance between security and resource efficiency during application development. We use graphical indicators (smiley faces) instead of numerical values to provide an intuitive, high-level comparison of IDS performance. This visual approach simplifies decision-making for IoT developers, aligning with similar methodologies used in prior work [75]. Moreover, all corresponding numerical values" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 302, + 381, + 544, + 404 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 381, + 544, + 404 + ], + "spans": [ + { + "bbox": [ + 302, + 381, + 544, + 404 + ], + "type": "text", + "content": "related to CPU usage, CPU load, and energy consumption are presented in the Figures and Tables in Section 5." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 406, + 545, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 406, + 545, + 632 + ], + "spans": [ + { + "bbox": [ + 302, + 406, + 545, + 632 + ], + "type": "text", + "content": "On the other hand, to the best of our knowledge, only Tekin et al. [12] have explored a similar direction in evaluating the performance of ML-based IDS in IoT systems. However, our study takes a fundamentally different approach, especially in how computational resources are classified and utilized, which plays a critical role in the effectiveness and scalability of IoT systems. While Tekin et al. focus on energy consumption and inference times using Raspberry Pi as an IoT device, our study emphasizes the advantages of processing data at the edge, especially regarding energy efficiency, CPU load, and usage. We show how models such as DT and RF benefit from edge processing, reducing latency and improving responsiveness, especially when combined with SDN, which optimizes network traffic and resource allocation. Our findings underscore the importance of balancing computational tasks across the network using SDN to maintain performance, unlike Tekin et al. [12], who do not explore the impact of edge computing or SDN integration." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 649, + 420, + 662 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 649, + 420, + 662 + ], + "spans": [ + { + "bbox": [ + 303, + 649, + 420, + 662 + ], + "type": "text", + "content": "8. Threat and validity" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 666, + 545, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 666, + 545, + 716 + ], + "spans": [ + { + "bbox": [ + 302, + 666, + 545, + 716 + ], + "type": "text", + "content": "Empirical research inevitably encounters issues related to the validity of findings. In light of this, the present section seeks to identify and discuss possible threats to our research's validity, per the recommendations of Wohlin et al. [76]." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 754, + 248, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 486, + 754, + 544, + 767 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 486, + 754, + 544, + 767 + ], + "spans": [ + { + "bbox": [ + 486, + 754, + 544, + 767 + ], + "type": "text", + "content": "Page 17 of 21" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 83, + 486, + 134 + ], + "blocks": [ + { + "bbox": [ + 50, + 57, + 294, + 80 + ], + "lines": [ + { + "bbox": [ + 50, + 57, + 294, + 80 + ], + "spans": [ + { + "bbox": [ + 50, + 57, + 294, + 80 + ], + "type": "text", + "content": "Table 11 Guideline for selecting seven ML-based IDS in edge gateway." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 83, + 486, + 134 + ], + "lines": [ + { + "bbox": [ + 107, + 83, + 486, + 134 + ], + "spans": [ + { + "bbox": [ + 107, + 83, + 486, + 134 + ], + "type": "table", + "html": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
", + "image_path": "57cb6a1aa51f0f83e9611c049e5334b014beab92ec3cd0b15411af446b8dc0ef.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 107, + 173, + 486, + 223 + ], + "blocks": [ + { + "bbox": [ + 50, + 149, + 315, + 170 + ], + "lines": [ + { + "bbox": [ + 50, + 149, + 315, + 170 + ], + "spans": [ + { + "bbox": [ + 50, + 149, + 315, + 170 + ], + "type": "text", + "content": "Table 12 Guideline for selecting seven ML-based IDS in SDN-edge gateway." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 173, + 486, + 223 + ], + "lines": [ + { + "bbox": [ + 107, + 173, + 486, + 223 + ], + "spans": [ + { + "bbox": [ + 107, + 173, + 486, + 223 + ], + "type": "table", + "html": "
MetricDTKNNRFCNNLSTMCNNLSTM+CNNEIDM
CPU load
CPU usage
Energy consumption
", + "image_path": "ebfdbe0d011113feaa528a9d5edb54922ef1df36ccfb3ae94161ccec55847281.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 64, + 225, + 516, + 237 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 64, + 225, + 516, + 237 + ], + "spans": [ + { + "bbox": [ + 64, + 225, + 516, + 237 + ], + "type": "text", + "content": "The energy consumption and CPU usage in all ML-based IDS lowered during the brute force attack and port scan." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 256, + 151, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 256, + 151, + 268 + ], + "spans": [ + { + "bbox": [ + 49, + 256, + 151, + 268 + ], + "type": "text", + "content": "8.1. Internal Threats" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 269, + 289, + 519 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 269, + 289, + 519 + ], + "spans": [ + { + "bbox": [ + 49, + 269, + 289, + 519 + ], + "type": "text", + "content": "During our empirical study on ML-based IDS in the context of IoT devices with IoT devices integrated with SDN, we recognized the existence of internal obstacles that impact the credibility of our findings. The precision of our performance measures is of utmost importance, namely the measurement of CPU load, CPU usage, and energy consumption in these intricate network settings. The complex characteristics of IoT devices and the adaptable structure of SDN provide significant difficulties in guaranteeing accurate and dependable performance evaluations. To address these concerns, we performed fifteen experiments on our testbeds. To improve the trustworthiness of our results in the context of SDN and IoT, we utilized average values to reduce the impact of network or hardware differences and ambient factors. In addition, the cyber threat simulations were conducted using highly practiced cyber security testing mechanisms in academic research and industries in IoT-edge devices integrated with SDN. This work aims to tackle internal risks associated with the setup and precision of ML-based IDS, improving their usefulness and significance in these fast-advancing technical fields." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 529, + 157, + 541 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 529, + 157, + 541 + ], + "spans": [ + { + "bbox": [ + 49, + 529, + 157, + 541 + ], + "type": "text", + "content": "8.2. External Threats:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 542, + 289, + 733 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 542, + 289, + 733 + ], + "spans": [ + { + "bbox": [ + 49, + 542, + 289, + 733 + ], + "type": "text", + "content": "The landscape of network security, especially in IoT-edge devices and IoT-edge devices integrated with SDN realms, is increasingly challenged by external threats. These range from sophisticated cyberattacks such as DoS, DDoS, and brute force attacks to more subtle, yet equally harmful, reconnaissance methods such as a port scan. These threats highlight the urgent need for robust and adaptable IDS solutions. Integrating ML into IDS presents promising advancements in threat detection and mitigation. However, this integration faces challenges due to the complexity of IoT-edge devices, which are marked by numerous interconnected devices, and the dynamic nature of SDN architectures. IDS solutions must be precise in threat detection while also being resource-efficient. Our research evaluates ML-based IDS based on CPU usage, CPU load, and energy consumption, especially under real-time cyber threats. These metrics are" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 256, + 545, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 256, + 545, + 398 + ], + "spans": [ + { + "bbox": [ + 305, + 256, + 545, + 398 + ], + "type": "text", + "content": "vital to ensure that ML-based IDS are effective in protecting networks against external threats and sustainable in their operation. They help maintain a crucial balance between security and performance in the complex ecosystems of IoT devices and IoT devices integrated with SDN. Additionally, to ensure the transparency and reproducibility of our study, we have provided detailed information about the experimental setup and made our testbed and results publicly available for further research [77]. By adopting these measures, we have attempted to provide robust validation and increase the inability to reject our findings among practitioners and researchers." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 415, + 378, + 427 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 415, + 378, + 427 + ], + "spans": [ + { + "bbox": [ + 305, + 415, + 378, + 427 + ], + "type": "text", + "content": "9. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 432, + 545, + 742 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 432, + 545, + 742 + ], + "spans": [ + { + "bbox": [ + 305, + 432, + 545, + 742 + ], + "type": "text", + "content": "This paper presents a comparative analysis of the ML-based IDS in IoT-edge devices and IoT-edge devices integrated with SDN under different cyberattack scenarios, resulting in comprehension. In IoT systems, conventional ML models (e.g., KNN and DT) often experience increased CPU load and CPU usage, especially when subjected to DoS and DDoS cyber threats. This suggests that these models have limits in resource-limited situations. In contrast, DL-based IDS (e.g., CNN and LSTM) exhibit reduced CPU usage, indicating improved efficiency and compatibility with IoT security. A consistent energy consumption pattern was identified across attack types in both scenarios, encompassing advanced neural networks and conventional methods. The consistent energy efficiency of these models, independent of their computing complexity, highlights their efficacy and long-term viability for use in different network environments. The findings emphasize the significance of choosing ML-based IDS according to their computational efficiency and energy consumption to achieve optimal performance in networks with limited resources. It is imperative to thoroughly evaluate the scalability and robustness of ML-based IDS in future research, especially in more significant and more complex network environments. This assessment will explain their ability to adjust to changing cyber threats. Furthermore, it is crucial to evaluate the influence of new technologies, e.g., 5G and edge computing, on the efficacy" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 50, + 756, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 756, + 543, + 766 + ], + "type": "text", + "content": "Page 18 of 21" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 48, + 54, + 289, + 78 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 54, + 289, + 78 + ], + "spans": [ + { + "bbox": [ + 48, + 54, + 289, + 78 + ], + "type": "text", + "content": "and suitability of ML-based IDS in advanced network infrastructures." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 48, + 79, + 289, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 79, + 289, + 185 + ], + "spans": [ + { + "bbox": [ + 48, + 79, + 289, + 185 + ], + "type": "text", + "content": "Future research directions should pivot towards optimizing ML-based IDS for enhanced scalability, real-time processing, and energy consumption. The overarching challenge is to develop effective threat detection models that minimally impact system resources. Furthermore, integrating these models into existing IoT devices and IoT devices integrated with SDN infrastructures presents additional challenges, including ensuring compatibility, scalability, and ease of maintenance." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 48, + 202, + 164, + 214 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 202, + 164, + 214 + ], + "spans": [ + { + "bbox": [ + 48, + 202, + 164, + 214 + ], + "type": "text", + "content": "A. Conflict of interest" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 48, + 219, + 289, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 219, + 289, + 255 + ], + "spans": [ + { + "bbox": [ + 48, + 219, + 289, + 255 + ], + "type": "text", + "content": "The authors declare that they have no known conflict of interest or personal relationships that could have appeared to influence the work reported in this paper." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 48, + 272, + 164, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 272, + 164, + 286 + ], + "spans": [ + { + "bbox": [ + 48, + 272, + 164, + 286 + ], + "type": "text", + "content": "B. Acknowledgement" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 48, + 290, + 289, + 313 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 48, + 290, + 289, + 313 + ], + "spans": [ + { + "bbox": [ + 48, + 290, + 289, + 313 + ], + "type": "text", + "content": "The authors thank Dr. Karim A. Emara et al. for collaborating to share the EIDM-IDS source code." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 331, + 108, + 343 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 331, + 108, + 343 + ], + "spans": [ + { + "bbox": [ + 49, + 331, + 108, + 343 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 348, + 289, + 726 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 54, + 348, + 289, + 377 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 348, + 289, + 377 + ], + "spans": [ + { + "bbox": [ + 54, + 348, + 289, + 377 + ], + "type": "text", + "content": "[1] D. G. Chowdhry, R. Verma, M. Mathur, The Evolution of Business in the Cyber Age: Digital Transformation, Threats, and Security, CRC Press, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 54, + 379, + 287, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 379, + 287, + 407 + ], + "spans": [ + { + "bbox": [ + 54, + 379, + 287, + 407 + ], + "type": "text", + "content": "[2] B. Kaur, S. Dadkhah, F. Shoeleh, al., Internet of things (iot) security dataset evolution: Challenges and future directions, Internet of Things (2023) 100780." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 54, + 409, + 288, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 409, + 288, + 436 + ], + "spans": [ + { + "bbox": [ + 54, + 409, + 288, + 436 + ], + "type": "text", + "content": "[3] S. Hadzovic, S. Mrdovic, M. Radonjic, A path towards an internet of things and artificial intelligence regulatory framework, IEEE Communications Magazine (2023)." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 54, + 438, + 288, + 467 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 438, + 288, + 467 + ], + "spans": [ + { + "bbox": [ + 54, + 438, + 288, + 467 + ], + "type": "text", + "content": "[4] K. L. M. Ang, J. K. P. Seng, E. Ngharamike, Towards crowdsourcing internet of things (crowd-iot): Architectures, security, and applications, Future Internet 14 (2) (2022) 49." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 54, + 468, + 288, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 468, + 288, + 486 + ], + "spans": [ + { + "bbox": [ + 54, + 468, + 288, + 486 + ], + "type": "text", + "content": "[5] M. Ahmid, O. Kazar, A comprehensive review of the internet of things security, Journal of Applied Security Research 18 (3) (2023) 289-305." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 54, + 488, + 289, + 525 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 488, + 289, + 525 + ], + "spans": [ + { + "bbox": [ + 54, + 488, + 289, + 525 + ], + "type": "text", + "content": "[6] P. Mall, R. Amin, A. K. Das, M. T. Leung, K.-K. R. Choo, Puf-based authentication and key agreement protocols for IoT, wsns, and smart grids: a comprehensive survey, IEEE Internet of Things Journal 9 (11) (2022) 8205-8228." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 54, + 527, + 289, + 566 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 527, + 289, + 566 + ], + "spans": [ + { + "bbox": [ + 54, + 527, + 289, + 566 + ], + "type": "text", + "content": "[7] A. Lakhlan, M. A. Mohammed, K. H. Abdulkareem, M. M. Jaber, J. Nedoma, R. Martinek, P. Zmij, Delay optimal schemes for internet of things applications in heterogeneous edge cloud computing networks, Sensors 22 (16) (2022) 5937." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 54, + 568, + 289, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 568, + 289, + 595 + ], + "spans": [ + { + "bbox": [ + 54, + 568, + 289, + 595 + ], + "type": "text", + "content": "[8] P. Malhotra, Y. Singh, P. Anand, Bangotra, al, Internet of things: Evolution, concerns and security challenges, Sensors 21 (5) (2021) 1809." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 54, + 597, + 289, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 597, + 289, + 626 + ], + "spans": [ + { + "bbox": [ + 54, + 597, + 289, + 626 + ], + "type": "text", + "content": "[9] A. Djenna, S. Harous, D. E. Saidouni, Internet of things meet the internet of threats: New concern cyber security issues of critical cyber infrastructure, Applied Sciences 11 (10) (2021) 4580." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 49, + 628, + 289, + 656 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 628, + 289, + 656 + ], + "spans": [ + { + "bbox": [ + 49, + 628, + 289, + 656 + ], + "type": "text", + "content": "[10] M. Almiani, A. AbuGhazleh, A. Al-Rahayfeh, S. Atiewi, A. Razaque, Deep recurrent neural network for IoT intrusion detection system, Simulation Modelling Practice and Theory 101 (2020) 102031." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 49, + 657, + 289, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 657, + 289, + 696 + ], + "spans": [ + { + "bbox": [ + 49, + 657, + 289, + 696 + ], + "type": "text", + "content": "[11] T. Rajmohan, P. H. Nguyen, N. Ferry, Research landscape of patterns and architectures for IoT security: a systematic review, in: 2020 46th Euromicro conference on software engineering and advanced applications (SEAA), IEEE, 2020, pp. 463-470." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 49, + 697, + 289, + 726 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 697, + 289, + 726 + ], + "spans": [ + { + "bbox": [ + 49, + 697, + 289, + 726 + ], + "type": "text", + "content": "[12] N. Tekin, A. Acar, A. Aris, A. S. Uluagac, V. C. Gungor, Energy consumption of on-device machine learning models for IoT intrusion detection, Internet of Things 21 (2023) 100670." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 56, + 544, + 721 + ], + "type": "list", + "angle": 0, + "index": 40, + "blocks": [ + { + "bbox": [ + 305, + 56, + 544, + 86 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 56, + 544, + 86 + ], + "spans": [ + { + "bbox": [ + 305, + 56, + 544, + 86 + ], + "type": "text", + "content": "[13] A. Hakiri, A. Gokhale, P. Berthou, D. C. Schmidt, T. Gayraud, Software-defined networking: Challenges and research opportunities for future internet, Computer Networks 75 (2014) 453-471." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 305, + 88, + 544, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 88, + 544, + 126 + ], + "spans": [ + { + "bbox": [ + 305, + 88, + 544, + 126 + ], + "type": "text", + "content": "[14] K. H. K. Reddy, A. K. Luhach, V. V. Kumar, S. Pratihar, D. Kumar, D. S. Roy, Towards energy efficient smart city services: A software defined resource management scheme for data centers, Sustainable Computing: Informatics and Systems 35 (2022) 100776." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 127, + 544, + 155 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 127, + 544, + 155 + ], + "spans": [ + { + "bbox": [ + 305, + 127, + 544, + 155 + ], + "type": "text", + "content": "[15] A. Montazerolghaem, Software-defined internet of multimedia things: Energy-efficient and load-balanced resource management, IEEE Internet of Things Journal 9 (3) (2021) 2432-2442." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 157, + 544, + 194 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 157, + 544, + 194 + ], + "spans": [ + { + "bbox": [ + 305, + 157, + 544, + 194 + ], + "type": "text", + "content": "[16] J. Liu, H. Shen, H. S. Narman, W. Chung, Z. Lin, A survey of mobile crowdsensing techniques: A critical component for the internet of things, ACM Transactions on Cyber-Physical Systems 2 (3) (2018) 1-26." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 196, + 544, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 196, + 544, + 225 + ], + "spans": [ + { + "bbox": [ + 305, + 196, + 544, + 225 + ], + "type": "text", + "content": "[17] B. B. Gupta, M. Quamara, An overview of internet of things (iot): Architectural aspects, challenges, and protocols, Concurrency and Computation: Practice and Experience 32 (21) (2020) e4946." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 226, + 543, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 226, + 543, + 255 + ], + "spans": [ + { + "bbox": [ + 305, + 226, + 543, + 255 + ], + "type": "text", + "content": "[18] A. A. Alsulami, Q. A. Al-Haija, A. Tayeb, Anomaly-based intrusion detection system for IoT networks with improved data engineering (2022)." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 256, + 543, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 256, + 543, + 294 + ], + "spans": [ + { + "bbox": [ + 305, + 256, + 543, + 294 + ], + "type": "text", + "content": "[19] I. Mukherjee, N. K. Sahu, S. K. Sahana, Simulation and modeling for anomaly detection in IoT network using machine learning, International Journal of Wireless Information Networks 30 (2) (2023) 173-189." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 296, + 543, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 296, + 543, + 325 + ], + "spans": [ + { + "bbox": [ + 305, + 296, + 543, + 325 + ], + "type": "text", + "content": "[20] O. Elnakib, E. Shaaban, M. Mahmoud, K. Emara, Eidm: deep learning model for IoT intrusion detection systems, The Journal of Supercomputing (2023) 1-21." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 305, + 326, + 543, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 326, + 543, + 363 + ], + "spans": [ + { + "bbox": [ + 305, + 326, + 543, + 363 + ], + "type": "text", + "content": "[21] M. Douiba, S. Benkirane, A. Guezzzaz, M. Azrour, An improved anomaly detection model for IoT security using decision tree and gradient boosting, The Journal of Supercomputing 79 (3) (2023) 3392-3411." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 365, + 543, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 365, + 543, + 394 + ], + "spans": [ + { + "bbox": [ + 305, + 365, + 543, + 394 + ], + "type": "text", + "content": "[22] S. M. Kasongo, Y. Sun, A deep learning method with wrapper-based feature extraction for wireless intrusion detection system, Computers & Security 92 (2020) 101752." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 305, + 395, + 543, + 424 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 395, + 543, + 424 + ], + "spans": [ + { + "bbox": [ + 305, + 395, + 543, + 424 + ], + "type": "text", + "content": "[23] A. Verma, V. Ranga, Machine learning-based intrusion detection systems for IoT applications, Wireless Personal Communications 111 (2020) 2287-2310." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 305, + 425, + 543, + 453 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 425, + 543, + 453 + ], + "spans": [ + { + "bbox": [ + 305, + 425, + 543, + 453 + ], + "type": "text", + "content": "[24] Y. Otoum, D. Liu, A. Nayak, Dl-ids: a deep learning-based intrusion detection framework for securing IoT, Transactions on Emerging Telecommunications Technologies 33 (3) (2022) e3803." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 305, + 455, + 543, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 455, + 543, + 483 + ], + "spans": [ + { + "bbox": [ + 305, + 455, + 543, + 483 + ], + "type": "text", + "content": "[25] T. Gaber, A. El-Ghamry, A. E. Hassanien, Injection attack detection using machine learning for smart IoT applications, Physical Communication 52 (2022) 101685." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 305, + 485, + 543, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 485, + 543, + 523 + ], + "spans": [ + { + "bbox": [ + 305, + 485, + 543, + 523 + ], + "type": "text", + "content": "[26] U. Sachdeva, P. R. Vamsi, Analysis of deep learning models for anomaly detection in time series IoT sensor data, in: Proceedings of the 2022 Fourteenth International Conference on Contemporary Computing, 2022, pp. 54-62." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 305, + 524, + 543, + 562 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 524, + 543, + 562 + ], + "spans": [ + { + "bbox": [ + 305, + 524, + 543, + 562 + ], + "type": "text", + "content": "[27] K. Nimmy, M. Dilraj, S. Sankaran, K. Achuthan, Leveraging power consumption for anomaly detection on IoT devices in smart homes, Journal of Ambient Intelligence and Humanized Computing (2022) 1-12." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 305, + 564, + 543, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 564, + 543, + 593 + ], + "spans": [ + { + "bbox": [ + 305, + 564, + 543, + 593 + ], + "type": "text", + "content": "[28] R. Chaganti, W. Suliman, V. Ravi, A. Dua, Deep learning approach for sdn-enabled intrusion detection system in IoT networks, Information 14 (1) (2023) 41." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 305, + 594, + 543, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 594, + 543, + 632 + ], + "spans": [ + { + "bbox": [ + 305, + 594, + 543, + 632 + ], + "type": "text", + "content": "[29] M. M. Isa, L. Mhamdi, Hybrid deep autoencoder with random forest in native sdn intrusion detection environment, in: ICC 2022-IEEE International Conference on Communications, IEEE, 2022, pp. 1698-1703." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 305, + 634, + 543, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 634, + 543, + 682 + ], + "spans": [ + { + "bbox": [ + 305, + 634, + 543, + 682 + ], + "type": "text", + "content": "[30] P. T. Duy, H. Do Hoang, N. H. Khoa, V.-H. Pham, et al., Fool your enemies: Enable cyber deception and moving target defense for intrusion detection in sdn, in: 2022 21st International Symposium on Communications and Information Technologies (ISCIT), IEEE, 2022, pp. 27-32." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 305, + 684, + 543, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 684, + 543, + 721 + ], + "spans": [ + { + "bbox": [ + 305, + 684, + 543, + 721 + ], + "type": "text", + "content": "[31] M. A. Bouke, A. Abdullah, S. H. ALshatebi, M. T. Abdullah, E2ids: An enhanced intelligent intrusion detection system based on decision tree algorithm, Journal of Applied Artificial Intelligence 3 (1) (2022) 1-16." + } + ] + } + ], + "index": 39 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 19 of 21" + } + ] + } + ], + "index": 42 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 49, + 56, + 289, + 732 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 49, + 56, + 289, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 56, + 289, + 95 + ], + "spans": [ + { + "bbox": [ + 49, + 56, + 289, + 95 + ], + "type": "text", + "content": "[32] L. A. C. Ahakonye, C. I. Nwakanma, J.-M. Lee, D.-S. Kim, Scada intrusion detection scheme exploiting the fusion of modified decision tree and chi-square feature selection, Internet of Things 21 (2023) 100676." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 96, + 289, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 96, + 289, + 126 + ], + "spans": [ + { + "bbox": [ + 49, + 96, + 289, + 126 + ], + "type": "text", + "content": "[33] M. Hammad, N. Hewahi, W. Elmedany, Mmm-rf: A novel high accuracy multinomial mixture model for network intrusion detection systems, Computers & Security 120 (2022) 102777." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 126, + 288, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 126, + 288, + 165 + ], + "spans": [ + { + "bbox": [ + 49, + 126, + 288, + 165 + ], + "type": "text", + "content": "[34] K. Albulayhi, Q. Abu Al-Haija, S. A. Alsuhibany, A. A. Jillepalli, M. Ashrafuzzaman, F. T. Sheldon, Iot intrusion detection using machine learning with a novel high performing feature selection method, Applied Sciences 12 (10) (2022) 5015." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 166, + 288, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 166, + 288, + 195 + ], + "spans": [ + { + "bbox": [ + 49, + 166, + 288, + 195 + ], + "type": "text", + "content": "[35] H. Yang, S. Liang, J. Ni, H. Li, X. S. Shen, Secure and efficient km classification for industrial internet of things, IEEE Internet of Things Journal 7 (11) (2020) 10945-10954." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 197, + 288, + 235 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 197, + 288, + 235 + ], + "spans": [ + { + "bbox": [ + 49, + 197, + 288, + 235 + ], + "type": "text", + "content": "[36] A. D. Afifaturahman, M. Firmansyah, Perbandingan algorithm k-nearest neighbour (knn) dan naive bayes pada intrusion detection system (ids), Innovation in Research of Informatics (INNOVATICs) 3 (1) (2021)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 49, + 236, + 288, + 294 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 236, + 288, + 294 + ], + "spans": [ + { + "bbox": [ + 49, + 236, + 288, + 294 + ], + "type": "text", + "content": "[37] F. Z. Belgrana, N. Benamrane, M. A. Hamaida, A. M. Chaabani, A. Taleb-Ahmed, Network intrusion detection system using neural network and condensed nearest neighbors with selection of nsl-kdd influencing features, in: 2020 IEEE International Conference on Internet of Things and Intelligence System (IoTaIS), IEEE, 2021, pp. 23-29." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 49, + 295, + 288, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 295, + 288, + 334 + ], + "spans": [ + { + "bbox": [ + 49, + 295, + 288, + 334 + ], + "type": "text", + "content": "[38] Y. Yan, L. Qi, J. Wang, Y. Lin, L. Chen, A network intrusion detection method based on stacked autoencoder and LSTM, in: ICC 2020-2020 IEEE International Conference on Communications (ICC), IEEE, 2020, pp. 1-6." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 49, + 336, + 288, + 364 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 336, + 288, + 364 + ], + "spans": [ + { + "bbox": [ + 49, + 336, + 288, + 364 + ], + "type": "text", + "content": "[39] M. D. Hossain, H. Inoue, H. Ochiai, D. Fall, Y. Kadobayashi, Lstmbased intrusion detection system for in-vehicle can bus communications, IEEE Access 8 (2020) 185489-185502." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 49, + 365, + 288, + 394 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 365, + 288, + 394 + ], + "spans": [ + { + "bbox": [ + 49, + 365, + 288, + 394 + ], + "type": "text", + "content": "[40] A. El-Ghamry, A. Darwish, A. E. Hassanien, An optimized cnn-based intrusion detection system for reducing risks in smart farming, Internet of Things 22 (2023) 100709." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 49, + 395, + 288, + 434 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 395, + 288, + 434 + ], + "spans": [ + { + "bbox": [ + 49, + 395, + 288, + 434 + ], + "type": "text", + "content": "[41] S. Jamshidi, A. Nikanjam, M. A. Hamdaqa, F. Khomh, Attack detection by using deep learning for cyber-physical system, in: Artificial Intelligence for Cyber-Physical Systems Hardening, Springer, 2022, pp. 155–179." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 49, + 435, + 288, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 435, + 288, + 464 + ], + "spans": [ + { + "bbox": [ + 49, + 435, + 288, + 464 + ], + "type": "text", + "content": "[42] P. Sun, P. Liu, Q. Li, C. Liu, X. Lu, R. Hao, J. Chen, Dl-ids: Extracting features using cnn-lstm hybrid network for intrusion detection system, Security and communication networks 2020 (2020) 1–11." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 49, + 465, + 288, + 494 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 465, + 288, + 494 + ], + "spans": [ + { + "bbox": [ + 49, + 465, + 288, + 494 + ], + "type": "text", + "content": "[43] A. Halbouni, T. S. Gunawan, M. H. Habaebi, M. Halbouni, M. Kartiwi, R. Ahmad, Cnn-lstm: hybrid deep neural network for network intrusion detection system, IEEE Access 10 (2022) 99837-99849." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 49, + 494, + 288, + 523 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 494, + 288, + 523 + ], + "spans": [ + { + "bbox": [ + 49, + 494, + 288, + 523 + ], + "type": "text", + "content": "[44] D. Stiawan, M. Y. B. Idris, A. M. Bamhdi, R. Budiarto, et al., Cicids-2017 dataset feature analysis with information gain for anomaly detection, IEEE Access 8 (2020) 132911–132921." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 49, + 524, + 288, + 554 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 524, + 288, + 554 + ], + "spans": [ + { + "bbox": [ + 49, + 524, + 288, + 554 + ], + "type": "text", + "content": "[45] R. Panigrahi, S. Borah, A detailed analysis of cicids2017 dataset for designing intrusion detection systems, International Journal of Engineering & Technology 7 (3.24) (2018) 479-482." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 49, + 555, + 288, + 583 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 555, + 288, + 583 + ], + "spans": [ + { + "bbox": [ + 49, + 555, + 288, + 583 + ], + "type": "text", + "content": "[46] A. A. Alsulami, Q. Abu Al-Haija, A. Tayeb, A. Alqahtani, An intrusion detection and classification system for IoT traffic with improved data engineering, Applied Sciences 12 (23) (2022) 12336." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 49, + 584, + 288, + 613 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 584, + 288, + 613 + ], + "spans": [ + { + "bbox": [ + 49, + 584, + 288, + 613 + ], + "type": "text", + "content": "[47] L. Yang, A. Moubayed, I. Hamieh, A. Shami, Tree-based intelligent intrusion detection system in internet of vehicles, in: 2019 IEEE global communications conference (GLOBECOM), IEEE, 2019, pp. 1-6." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 49, + 614, + 288, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 614, + 288, + 633 + ], + "spans": [ + { + "bbox": [ + 49, + 614, + 288, + 633 + ], + "type": "text", + "content": "[48] Great Learning, Label encoding in python, [link], accessed: 2024-03-21 (n.d.)." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 49, + 634, + 288, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 634, + 288, + 653 + ], + "spans": [ + { + "bbox": [ + 49, + 634, + 288, + 653 + ], + "type": "text", + "content": "[49] Analytics Vidhya, Overcoming class imbalance using smote techniques, [link], accessed: 2024-03-21 (2020)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 49, + 654, + 288, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 654, + 288, + 693 + ], + "spans": [ + { + "bbox": [ + 49, + 654, + 288, + 693 + ], + "type": "text", + "content": "[50] T. N. Sainath, O. Vinyals, A. Senior, H. Sak, Convolutional, long short-term memory, fully connected deep neural networks, in: 2015 IEEE international conference on acoustics, speech and signal processing (ICASSP), IEEE, 2015, pp. 4580-4584." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 49, + 693, + 288, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 693, + 288, + 732 + ], + "spans": [ + { + "bbox": [ + 49, + 693, + 288, + 732 + ], + "type": "text", + "content": "[51] L. Muhammad, A. A. Haruna, U. S. Sharif, M. B. Mohammed, Cnn-lstm deep learning based forecasting model for Covid-19 infection cases in nigeria, south africa and botswana, Health and technology 12 (6) (2022) 1259–1276." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 305, + 56, + 543, + 732 + ], + "type": "list", + "angle": 0, + "index": 44, + "blocks": [ + { + "bbox": [ + 305, + 56, + 543, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 56, + 543, + 95 + ], + "spans": [ + { + "bbox": [ + 305, + 56, + 543, + 95 + ], + "type": "text", + "content": "[52] L. Alzubaidi, J. Zhang, A. J. Humaidi, A. Al-Dujaili, Y. Duan, O. Al-Shamma, J. Santamaría, M. A. Fadhel, M. Al-Amidie, L. Farhan, Review of deep learning: concepts, cnn architectures, challenges, applications, future directions, Journal of big Data 8 (2021) 1-74." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 305, + 96, + 543, + 126 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 96, + 543, + 126 + ], + "spans": [ + { + "bbox": [ + 305, + 96, + 543, + 126 + ], + "type": "text", + "content": "[53] G. Najera-Gutierrez, J. A. Ansari, Web Penetration Testing with Kali Linux: Explore the methods and tools of ethical hacking with Kali Linux, Packt Publishing Ltd, 2018." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 305, + 126, + 543, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 126, + 543, + 165 + ], + "spans": [ + { + "bbox": [ + 305, + 126, + 543, + 165 + ], + "type": "text", + "content": "[54] S. Asadollahi, B. Goswami, M. Sameer, Ryu controller's scalability experiment on software defined networks, in: 2018 IEEE international conference on current trends in advanced computing (ICCTAC), IEEE, 2018, pp. 1-5." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 305, + 166, + 543, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 166, + 543, + 195 + ], + "spans": [ + { + "bbox": [ + 305, + 166, + 543, + 195 + ], + "type": "text", + "content": "[55] K. Kaur, J. Singh, N. S. Ghumman, Mininet as software defined networking testing platform, in: International conference on communication, computing & systems (ICCCS), 2014, pp. 139-42." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 305, + 196, + 543, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 196, + 543, + 215 + ], + "spans": [ + { + "bbox": [ + 305, + 196, + 543, + 215 + ], + "type": "text", + "content": "[56] L. St, S. Wold, et al., Analysis of variance (anova), Chemometrics and intelligent laboratory systems 6 (4) (1989) 259-272." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 305, + 216, + 543, + 264 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 216, + 543, + 264 + ], + "spans": [ + { + "bbox": [ + 305, + 216, + 543, + 264 + ], + "type": "text", + "content": "[57] D. Breitenbacher, I. Homoliak, Y. L. Aung, N. O. Tippenhauer, Y. Elovici, Hades-iot: A practical host-based anomaly detection system for iot devices, in: Proceedings of the 2019 ACM Asia conference on computer and communications security, 2019, pp. 479-484." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 305, + 265, + 543, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 265, + 543, + 305 + ], + "spans": [ + { + "bbox": [ + 305, + 265, + 543, + 305 + ], + "type": "text", + "content": "[58] B. Chen, Y. Zhang, G. Iosifidis, M. Liu, Reinforcement learning on computational resource allocation of cloud-based wireless networks, in: 2020 IEEE 6th World Forum on Internet of Things (WF-IoT), IEEE, 2020, pp. 1-6." + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 305, + 306, + 543, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 306, + 543, + 334 + ], + "spans": [ + { + "bbox": [ + 305, + 306, + 543, + 334 + ], + "type": "text", + "content": "[59] R. D. Corin, A. Costanzo, F. Callegati, D. Siracusa, Methods and techniques for dynamic deployability of software-defined security services, CoRR (2020)." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 305, + 336, + 427, + 344 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 336, + 427, + 344 + ], + "spans": [ + { + "bbox": [ + 305, + 336, + 427, + 344 + ], + "type": "text", + "content": "[60] A. van de Ven, Powertop, [link]." + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 305, + 345, + 543, + 374 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 345, + 543, + 374 + ], + "spans": [ + { + "bbox": [ + 305, + 345, + 543, + 374 + ], + "type": "text", + "content": "[61] N. F. Syed, Z. Baig, A. Ibrahim, C. Valli, Denial of service attack detection through machine learning for the IoT, Journal of Information and Telecommunication 4 (4) (2020) 482-503." + } + ] + } + ], + "index": 31 + }, + { + "bbox": [ + 305, + 375, + 543, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 375, + 543, + 403 + ], + "spans": [ + { + "bbox": [ + 305, + 375, + 543, + 403 + ], + "type": "text", + "content": "[62] K. Sonar, H. Upadhyay, A survey: Ddos attack on internet of things, International Journal of Engineering Research and Development 10 (11) (2014) 58-63." + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 305, + 405, + 543, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 405, + 543, + 444 + ], + "spans": [ + { + "bbox": [ + 305, + 405, + 543, + 444 + ], + "type": "text", + "content": "[63] M. M. Raikar, S. Meena, Ssh brute force attack mitigation in internet of things (iot) network: An edge device security measure, in: 2021 2nd international conference on secure cyber computing and communications (ICSCCC), IEEE, 2021, pp. 72-77." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 305, + 444, + 543, + 484 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 444, + 543, + 484 + ], + "spans": [ + { + "bbox": [ + 305, + 444, + 543, + 484 + ], + "type": "text", + "content": "[64] Q. A. Al-Haija, E. Saleh, M. Alnabhan, Detecting port scan attacks using logistic regression, in: 2021 4th International symposium on advanced electrical and communication technologies (ISAECT), IEEE, 2021, pp. 1-5." + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 305, + 484, + 543, + 513 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 484, + 543, + 513 + ], + "spans": [ + { + "bbox": [ + 305, + 484, + 543, + 513 + ], + "type": "text", + "content": "[65] Z. Campbell, A. Bray, A. Ritz, A. Groce, Differentially private anova testing, in: 2018 1st International Conference on Data Intelligence and Security (ICDIS), IEEE, 2018, pp. 281-285." + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 305, + 514, + 543, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 514, + 543, + 533 + ], + "spans": [ + { + "bbox": [ + 305, + 514, + 543, + 533 + ], + "type": "text", + "content": "[66] H. Wei, X. Song, Smooth tests for normality in anova, arXiv preprint arXiv:2110.04849 (2021)." + } + ] + } + ], + "index": 36 + }, + { + "bbox": [ + 305, + 534, + 511, + 543 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 534, + 511, + 543 + ], + "spans": [ + { + "bbox": [ + 305, + 534, + 511, + 543 + ], + "type": "text", + "content": "[67] E. Frimpong, A performance study of the snort ids (2008)." + } + ] + } + ], + "index": 37 + }, + { + "bbox": [ + 305, + 543, + 543, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 543, + 543, + 593 + ], + "spans": [ + { + "bbox": [ + 305, + 543, + 543, + 593 + ], + "type": "text", + "content": "[68] D. Fadhilah, M. I. Marzuki, Performance analysis of ids snort and ids suricata with many-core processor in virtual machines against dos/ddos attacks, in: 2020 2nd International Conference on Broadband Communications, Wireless Sensors and Powering (BCWSP), IEEE, 2020, pp. 157-162." + } + ] + } + ], + "index": 38 + }, + { + "bbox": [ + 305, + 593, + 543, + 623 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 593, + 543, + 623 + ], + "spans": [ + { + "bbox": [ + 305, + 593, + 543, + 623 + ], + "type": "text", + "content": "[69] M. Hawedi, C. Talhi, H. Boucheneb, Multi-tenant intrusion detection system for public cloud (mtids), The Journal of Supercomputing 74 (2018) 5199–5230." + } + ] + } + ], + "index": 39 + }, + { + "bbox": [ + 305, + 624, + 543, + 653 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 624, + 543, + 653 + ], + "spans": [ + { + "bbox": [ + 305, + 624, + 543, + 653 + ], + "type": "text", + "content": "[70] S. M. Raza, J. Jeong, M. Kim, B. Kang, H. Choo, Empirical performance and energy consumption evaluation of container solutions on resource constrained IoT gateways, Sensors 21 (4) (2021) 1378." + } + ] + } + ], + "index": 40 + }, + { + "bbox": [ + 305, + 654, + 543, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 654, + 543, + 682 + ], + "spans": [ + { + "bbox": [ + 305, + 654, + 543, + 682 + ], + "type": "text", + "content": "[71] W. Park, S. Ahn, Performance comparison and detection analysis in snort and suricata environment, Wireless Personal Communications 94 (2017) 241-252." + } + ] + } + ], + "index": 41 + }, + { + "bbox": [ + 305, + 683, + 543, + 712 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 683, + 543, + 712 + ], + "spans": [ + { + "bbox": [ + 305, + 683, + 543, + 712 + ], + "type": "text", + "content": "[72] E. Ozturk Kiyak, B. Ghasemkhani, D. Birant, High-level k-nearest neighbors (hlknn): A supervised machine learning model for classification analysis, Electronics 12 (18) (2023) 3828." + } + ] + } + ], + "index": 42 + }, + { + "bbox": [ + 305, + 713, + 543, + 732 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 713, + 543, + 732 + ], + "spans": [ + { + "bbox": [ + 305, + 713, + 543, + 732 + ], + "type": "text", + "content": "[73] E. Altulaihan, M. A. Almaiah, A. Aljughaiman, Anomaly detection ids for detecting dos attacks in IoT networks based on machine learning" + } + ] + } + ], + "index": 43 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 45 + }, + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 20 of 21" + } + ] + } + ], + "index": 46 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 57, + 193, + 66 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 57, + 193, + 66 + ], + "spans": [ + { + "bbox": [ + 67, + 57, + 193, + 66 + ], + "type": "text", + "content": "algorithms, Sensors 24 (2) (2024) 713." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 49, + 67, + 289, + 156 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 49, + 67, + 289, + 86 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 67, + 289, + 86 + ], + "spans": [ + { + "bbox": [ + 49, + 67, + 289, + 86 + ], + "type": "text", + "content": "[74] U. Nations, United nations goals: Sustainable development, [link], accessed: September 3, 2024 (2023)." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 49, + 87, + 289, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 87, + 289, + 115 + ], + "spans": [ + { + "bbox": [ + 49, + 87, + 289, + 115 + ], + "type": "text", + "content": "[75] F. Khomh, S. A. Abtahizadeh, Understanding the impact of cloud patterns on performance and energy consumption, Journal of Systems and Software 141 (2018) 151-170." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 49, + 116, + 288, + 145 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 116, + 288, + 145 + ], + "spans": [ + { + "bbox": [ + 49, + 116, + 288, + 145 + ], + "type": "text", + "content": "[76] C. Wohlin, P. Runeson, M. Höst, M. C. Ohlsson, B. Regnell, A. Wesslén, Experimentation in software engineering, Springer Science & Business Media, 2012." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 49, + 146, + 201, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 146, + 201, + 156 + ], + "spans": [ + { + "bbox": [ + 49, + 146, + 201, + 156 + ], + "type": "text", + "content": "[77] S. Jamshidi, Replication packages, [link]." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 304, + 56, + 358, + 71 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 56, + 358, + 71 + ], + "spans": [ + { + "bbox": [ + 304, + 56, + 358, + 71 + ], + "type": "text", + "content": "Appendix" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 305, + 103, + 566, + 524 + ], + "blocks": [ + { + "bbox": [ + 332, + 82, + 516, + 94 + ], + "lines": [ + { + "bbox": [ + 332, + 82, + 516, + 94 + ], + "spans": [ + { + "bbox": [ + 332, + 82, + 516, + 94 + ], + "type": "text", + "content": "Table 13: Abbreviations used in this research." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 305, + 103, + 566, + 524 + ], + "lines": [ + { + "bbox": [ + 305, + 103, + 566, + 524 + ], + "spans": [ + { + "bbox": [ + 305, + 103, + 566, + 524 + ], + "type": "table", + "html": "
AbbreviationMeaning
AIArtificial Intelligence
ANOVAAnalysis of Variance
ANNArtificial Neural Network
BTBoosting Tree
CPUCentral Processing Unit
DAEDeep Autoencoder
DDoSDistributed Denial-of-Service
DLDeep Learning
DoSDenial-of-Service
DTDecision Tree
GPUGraphics Processing Unit
IDSIntrusion Detection System
IoTInternet of Things
KNNK-Nearest Neighbor
LRLogistic Regression
LSTMLong Short-Term Memory
CNNConvolutional Neural Network
MCUMicrocontroller Unit
MITMMan-in-the-Middle
MLMachine Learning
MTDMoving Target Defense
NBNaïve Bayes
R2LRoot to Local
RFRandom Forest
RNNRecurrent Neural Network
SDNSoftware-Defined Networking
SDPNStacked-Deep Polynomial Network
SMOSpider Monkey Optimization
SMOTESynthetic Minority Oversampling Technique
SNNSpiking Neural Network
SVMSupport Vector Machine
U2RUser to Root
WFEUWrapper Feature Extraction Unit
WSNWireless Sensor Network
", + "image_path": "80b826b2da3aaae86deb3b263a1de847b08d3842d3eb3476def4d337e1348324.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_body" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "spans": [ + { + "bbox": [ + 169, + 34, + 424, + 45 + ], + "type": "text", + "content": "Evaluating Machine Learning-driven Intrusion Detection System" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "spans": [ + { + "bbox": [ + 49, + 755, + 247, + 766 + ], + "type": "text", + "content": "Saeid Jamshidi et al.: Preprint submitted to Elsevier" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "spans": [ + { + "bbox": [ + 487, + 755, + 543, + 766 + ], + "type": "text", + "content": "Page 21 of 21" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 595, + 793 + ], + "page_idx": 20 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_content_list.json b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..52f292cfa23be8a2af0f36bc57ce16d58a51a75c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_content_list.json @@ -0,0 +1,1210 @@ +[ + { + "type": "text", + "text": "TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning", + "text_level": 1, + "bbox": [ + 186, + 121, + 816, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Xingjian Zhang $^{1,*}$ Siwei Wen $^{1,2,*}$ Wenjun Wu $^{1,2,3}$ Lei Huang $^{1,2,3,\\boxtimes}$", + "bbox": [ + 245, + 223, + 767, + 242 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China \n $^{2}$ Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University \n $^{3}$ Hangzhou International Innovation Institute, Beihang University, Hangzhou, China", + "bbox": [ + 184, + 253, + 810, + 297 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "{huangleiai}@buaa.edu.cn", + "bbox": [ + 393, + 308, + 604, + 325 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 359, + 540, + 376 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of \"aha moments\". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1.", + "bbox": [ + 228, + 392, + 769, + 616 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 645, + 315, + 662 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data.", + "bbox": [ + 169, + 676, + 826, + 777 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary.", + "bbox": [ + 169, + 781, + 828, + 881 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09641v1 [cs.CV] 13 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "\\*denotes equal contributor; $\\boxtimes$ denotes corresponding author.", + "bbox": [ + 189, + 893, + 565, + 907 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg", + "image_caption": [ + "Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue)." + ], + "image_footnote": [], + "bbox": [ + 186, + 99, + 810, + 178 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 195, + 210, + 215 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in and final answer (option) in tags.", + "bbox": [ + 212, + 191, + 790, + 220 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Owen2-VL-7B-Instruct:", + "text_level": 1, + "bbox": [ + 189, + 232, + 315, + 241 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": " The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. C. pick up ", + "bbox": [ + 183, + 241, + 792, + 268 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "TinyLLaVA-Video-SFT: C. pick up.", + "bbox": [ + 191, + 280, + 383, + 292 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "TinyLLaVA-Video-R1:", + "text_level": 1, + "bbox": [ + 189, + 305, + 303, + 313 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": " Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. C ", + "bbox": [ + 183, + 314, + 799, + 412 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of \"aha moments\", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models.", + "bbox": [ + 169, + 500, + 823, + 597 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 621, + 321, + 636 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as \"aha moments\" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models.", + "bbox": [ + 169, + 655, + 826, + 794 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance.", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg", + "image_caption": [ + "Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning." + ], + "image_footnote": [], + "bbox": [ + 186, + 99, + 808, + 181 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 183, + 196, + 210, + 217 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in and final answer (option) in tags.", + "bbox": [ + 212, + 191, + 812, + 220 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Owen2-VL-7B-Instruct:", + "text_level": 1, + "bbox": [ + 189, + 233, + 315, + 242 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": " The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. A. Bone grafting. ", + "bbox": [ + 183, + 242, + 808, + 270 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TinyLLaVA-Video-SFT: A.", + "text_level": 1, + "bbox": [ + 191, + 281, + 331, + 292 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TinyLLaVA-Video-R1:", + "text_level": 1, + "bbox": [ + 189, + 306, + 303, + 314 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": " To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. A ", + "bbox": [ + 181, + 314, + 808, + 412 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs.", + "bbox": [ + 169, + 489, + 823, + 587 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Methods", + "text_level": 1, + "bbox": [ + 171, + 602, + 279, + 617 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications.", + "bbox": [ + 169, + 628, + 823, + 696 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 TinyLLaVA-Video", + "text_level": 1, + "bbox": [ + 171, + 709, + 341, + 724 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning.", + "bbox": [ + 169, + 734, + 823, + 847 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Group Relative Policy Optimization (GRPO)", + "text_level": 1, + "bbox": [ + 171, + 857, + 524, + 872 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We follow the GRPO algorithm [17] to train the model. For each question $\\mathbf{q}$ , the policy model generates a set of candidate responses $\\{O_1, O_2, \\dots, O_G\\}$ , computes the corresponding rewards", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg", + "image_caption": [ + "Figure 3: Cases of \"aha moment\", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively." + ], + "image_footnote": [], + "bbox": [ + 176, + 89, + 823, + 392 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "$\\{r_1, r_2, \\ldots, r_G\\}$ based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:", + "bbox": [ + 169, + 449, + 823, + 494 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nJ _ {G R P O} (\\theta) = \\mathbb {E} _ {[ q, \\{o _ {i} \\} ]} \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}} A _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} \\right] - \\beta \\mathbb {D} _ {K L} [ \\pi_ {\\theta} \\| \\pi_ {r e f} ] \\right\\} \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 197, + 503, + 823, + 532 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\pi_{\\theta}$ and $\\pi_{\\theta_{old}}$ are the current and old policy, $\\epsilon$ and $\\beta$ are hyper-parameters, and $A_{i}$ is the advantages defined as:", + "bbox": [ + 169, + 541, + 823, + 570 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 375, + 574, + 823, + 608 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set $\\{O_i\\}$ are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise $\\mathcal{N}(0, 0.02^2)$ to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses.", + "bbox": [ + 169, + 621, + 823, + 705 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Training Data and Template.", + "text_level": 1, + "bbox": [ + 171, + 724, + 415, + 739 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer.", + "bbox": [ + 169, + 752, + 823, + 851 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in and final answer (option) in tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format.", + "bbox": [ + 169, + 854, + 823, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "table", + "img_path": "images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelLLM sizeAnswer TypeMVBenchVideo-MME (wo sub)MLVUMMVU (mc)
LLaMA-VID [10]7BOption41.4-33.2-
LLaVA-NeXT [12]7BOption--39.329.2
VideoLLaVA [11]7BOption-39.947.3-
ShareGPT4Video [2]8BOption-39.946.4-
LLaVA-Mini [26]7BOption44.5-42.8-
InternVideo2 [22]8BOption-41.9-39.0
TinyLLaVA-Video-SFT3BOption49.042.249.246.1
TinyLLaVA-Video-ColdStart3BReason33.226.628.622.7
TinyLLaVA-Video-R13BReason49.546.652.446.9
", + "bbox": [ + 173, + 88, + 823, + 281 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. \"Option\" indicates that the model only needs to answer with the selected choice, while \"Reason\" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface.", + "bbox": [ + 169, + 290, + 823, + 361 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.4 Reward Rules.", + "text_level": 1, + "bbox": [ + 171, + 382, + 315, + 396 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:", + "bbox": [ + 169, + 410, + 823, + 439 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Format reward. We require the thought process to be enclosed within $\\langle \\text{think} \\rangle \\langle \\text{/think} \\rangle$ , and the final answer to be enclosed within $\\langle \\text{answer} \\rangle \\langle \\text{/answer} \\rangle$ . These four tags can appear only once in the entire response, and if followed, the model will receive a format reward $FR = r_0 + LR$ . Here, $r_0$ represents the base reward for adhering to the required response format, and $LR$ is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:", + "bbox": [ + 169, + 458, + 823, + 529 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nL R = \\min \\left(1, \\frac {L e n}{M L}\\right) \\times r _ {1}. \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 540, + 823, + 574 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Here, $Len$ represents the length of the response extracted from within the $<\\text{think}>$ / $<\\text{think}>$ tags, and $ML$ represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set $r_0 = r_1 = 0.5$ , thus the format reward is limited to a maximum of 1.", + "bbox": [ + 169, + 585, + 823, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Accuracy reward. We design the accuracy reward $AR$ based on the answer. We extract the final answer from and compare it with the label. The model will receive an accuracy reward of $AR = r_2 > 0$ , if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. $AR = 0$ . To ensure that the accuracy reward and the format reward have equal importance, we set $r_2 = r_0 + r_1$ in our experiments.", + "bbox": [ + 169, + 647, + 823, + 732 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward $R$ defined by the following formula:", + "bbox": [ + 169, + 750, + 823, + 808 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\nR = \\left\\{ \\begin{array}{l l} A R + F R, & \\text {i f} F R > 0 \\text {a n d} A R = r _ {2} \\\\ - F R, & \\text {i f} F R > 0 \\text {a n d} A R = 0 \\\\ - \\left(r _ {0} + r _ {1} + r _ {2}\\right), & \\text {i f} F R = 0 \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 323, + 820, + 823, + 871 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred.", + "bbox": [ + 169, + 883, + 823, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experiments", + "text_level": 1, + "bbox": [ + 171, + 89, + 313, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experimental Settings", + "text_level": 1, + "bbox": [ + 171, + 126, + 369, + 142 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training.", + "bbox": [ + 169, + 154, + 823, + 198 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1.", + "bbox": [ + 169, + 203, + 823, + 258 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities.", + "bbox": [ + 169, + 265, + 825, + 321 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2 Main Results and Aha Moment", + "text_level": 1, + "bbox": [ + 171, + 347, + 433, + 361 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities.", + "bbox": [ + 169, + 376, + 823, + 460 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg", + "image_caption": [ + "(a) Evolution in completion length." + ], + "image_footnote": [], + "bbox": [ + 173, + 481, + 380, + 609 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg", + "image_caption": [ + "Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training." + ], + "image_footnote": [], + "bbox": [ + 395, + 481, + 602, + 609 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg", + "image_caption": [ + "(b) Evolution in accuracy reward.", + "(c) Evolution in format reward." + ], + "image_footnote": [], + "bbox": [ + 614, + 481, + 821, + 609 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models.", + "bbox": [ + 169, + 704, + 823, + 787 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the \"aha moment\" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection.", + "bbox": [ + 169, + 792, + 823, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.3 Ablation Study", + "text_level": 1, + "bbox": [ + 171, + 90, + 318, + 104 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1.", + "bbox": [ + 169, + 116, + 823, + 146 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.1 Impact of Cold-Start Data", + "text_level": 1, + "bbox": [ + 169, + 159, + 410, + 174 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as option . We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models.", + "bbox": [ + 169, + 183, + 823, + 267 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training.", + "bbox": [ + 169, + 272, + 825, + 343 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.2 Impact of Refinement of Format Rewards", + "text_level": 1, + "bbox": [ + 169, + 356, + 516, + 371 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4.", + "bbox": [ + 169, + 380, + 517, + 630 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg", + "image_caption": [ + "Figure 5: The variation in response length during training under different settings." + ], + "image_footnote": [], + "bbox": [ + 532, + 383, + 821, + 563 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3.3 Other Experimental Explorations", + "text_level": 1, + "bbox": [ + 169, + 643, + 460, + 657 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models.", + "bbox": [ + 169, + 667, + 826, + 820 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5 Conclusion and Future Work", + "text_level": 1, + "bbox": [ + 169, + 838, + 450, + 854 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of \"aha", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg", + "image_caption": [ + "Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark." + ], + "image_footnote": [], + "bbox": [ + 178, + 93, + 821, + 334 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "moment\". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:", + "bbox": [ + 169, + 431, + 823, + 487 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data.", + "- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment." + ], + "bbox": [ + 215, + 500, + 823, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities.", + "bbox": [ + 169, + 619, + 823, + 672 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 693, + 267, + 708 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3", + "[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5", + "[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3", + "[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6" + ], + "bbox": [ + 179, + 715, + 825, + 911 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2", + "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2", + "[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2", + "[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3", + "[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6", + "[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5", + "[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5", + "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5", + "[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7", + "[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1", + "[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1", + "[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3", + "[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3", + "[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1", + "[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2", + "[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7", + "[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1", + "[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5", + "[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4" + ], + "bbox": [ + 171, + 90, + 883, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7", + "[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3", + "[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5", + "[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3", + "[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4", + "[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6", + "[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3", + "[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6" + ], + "bbox": [ + 171, + 90, + 825, + 428 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_model.json b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d91be97a0cd8732863f861f2b5b29f8d804b0234 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_model.json @@ -0,0 +1,1584 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.09641v1 [cs.CV] 13 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.187, + 0.122, + 0.817, + 0.175 + ], + "angle": 0, + "content": "TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning" + }, + { + "type": "text", + "bbox": [ + 0.246, + 0.224, + 0.768, + 0.243 + ], + "angle": 0, + "content": "Xingjian Zhang\\(^{1,*}\\) Siwei Wen\\(^{1,2,*}\\) Wenjun Wu\\(^{1,2,3}\\) Lei Huang\\(^{1,2,3,\\boxtimes}\\)" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.254, + 0.812, + 0.298 + ], + "angle": 0, + "content": "\\(^{1}\\)SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China \n\\(^{2}\\)Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University \n\\(^{3}\\)Hangzhou International Innovation Institute, Beihang University, Hangzhou, China" + }, + { + "type": "text", + "bbox": [ + 0.394, + 0.309, + 0.605, + 0.326 + ], + "angle": 0, + "content": "{huangleiai}@buaa.edu.cn" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.36, + 0.542, + 0.377 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.393, + 0.77, + 0.617 + ], + "angle": 0, + "content": "Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of \"aha moments\". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.646, + 0.316, + 0.663 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.678, + 0.827, + 0.779 + ], + "angle": 0, + "content": "Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.782, + 0.829, + 0.882 + ], + "angle": 0, + "content": "Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary." + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.894, + 0.566, + 0.909 + ], + "angle": 0, + "content": "\\*denotes equal contributor; \\(\\boxtimes\\) denotes corresponding author." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.1, + 0.812, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.196, + 0.212, + 0.217 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.192, + 0.792, + 0.221 + ], + "angle": 0, + "content": "Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in and final answer (option) in tags." + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.233, + 0.316, + 0.242 + ], + "angle": 0, + "content": "Owen2-VL-7B-Instruct:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.242, + 0.794, + 0.27 + ], + "angle": 0, + "content": " The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. C. pick up " + }, + { + "type": "text", + "bbox": [ + 0.192, + 0.281, + 0.385, + 0.293 + ], + "angle": 0, + "content": "TinyLLaVA-Video-SFT: C. pick up." + }, + { + "type": "title", + "bbox": [ + 0.191, + 0.306, + 0.304, + 0.314 + ], + "angle": 0, + "content": "TinyLLaVA-Video-R1:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.315, + 0.8, + 0.414 + ], + "angle": 0, + "content": " Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. C " + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.435, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.5, + 0.825, + 0.598 + ], + "angle": 0, + "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of \"aha moments\", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.622, + 0.323, + 0.637 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.656, + 0.827, + 0.795 + ], + "angle": 0, + "content": "Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as \"aha moments\" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.1, + 0.81, + 0.182 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.184, + 0.197, + 0.212, + 0.218 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.193, + 0.813, + 0.222 + ], + "angle": 0, + "content": "Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in and final answer (option) in tags." + }, + { + "type": "title", + "bbox": [ + 0.19, + 0.234, + 0.316, + 0.243 + ], + "angle": 0, + "content": "Owen2-VL-7B-Instruct:" + }, + { + "type": "text", + "bbox": [ + 0.184, + 0.243, + 0.81, + 0.271 + ], + "angle": 0, + "content": " The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. A. Bone grafting. " + }, + { + "type": "title", + "bbox": [ + 0.192, + 0.282, + 0.333, + 0.293 + ], + "angle": 0, + "content": "TinyLLaVA-Video-SFT: A." + }, + { + "type": "title", + "bbox": [ + 0.191, + 0.307, + 0.304, + 0.315 + ], + "angle": 0, + "content": "TinyLLaVA-Video-R1:" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.315, + 0.81, + 0.414 + ], + "angle": 0, + "content": " To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. A " + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.436, + 0.825, + 0.478 + ], + "angle": 0, + "content": "Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.491, + 0.825, + 0.588 + ], + "angle": 0, + "content": "Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.603, + 0.28, + 0.618 + ], + "angle": 0, + "content": "3 Methods" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.63, + 0.825, + 0.698 + ], + "angle": 0, + "content": "To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.71, + 0.343, + 0.725 + ], + "angle": 0, + "content": "3.1 TinyLLaVA-Video" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.735, + 0.825, + 0.848 + ], + "angle": 0, + "content": "TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.858, + 0.526, + 0.873 + ], + "angle": 0, + "content": "3.2 Group Relative Policy Optimization (GRPO)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "We follow the GRPO algorithm [17] to train the model. For each question \\( \\mathbf{q} \\), the policy model generates a set of candidate responses \\( \\{O_1, O_2, \\dots, O_G\\} \\), computes the corresponding rewards" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.09, + 0.824, + 0.393 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.401, + 0.825, + 0.43 + ], + "angle": 0, + "content": "Figure 3: Cases of \"aha moment\", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.45, + 0.825, + 0.495 + ], + "angle": 0, + "content": "\\(\\{r_1, r_2, \\ldots, r_G\\}\\) based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:" + }, + { + "type": "equation", + "bbox": [ + 0.198, + 0.504, + 0.825, + 0.533 + ], + "angle": 0, + "content": "\\[\nJ _ {G R P O} (\\theta) = \\mathbb {E} _ {[ q, \\{o _ {i} \\} ]} \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}} A _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} \\right] - \\beta \\mathbb {D} _ {K L} [ \\pi_ {\\theta} \\| \\pi_ {r e f} ] \\right\\} \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.542, + 0.825, + 0.571 + ], + "angle": 0, + "content": "where \\(\\pi_{\\theta}\\) and \\(\\pi_{\\theta_{old}}\\) are the current and old policy, \\(\\epsilon\\) and \\(\\beta\\) are hyper-parameters, and \\(A_{i}\\) is the advantages defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.376, + 0.575, + 0.825, + 0.609 + ], + "angle": 0, + "content": "\\[\nA _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.707 + ], + "angle": 0, + "content": "In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set \\(\\{O_i\\}\\) are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise \\(\\mathcal{N}(0, 0.02^2)\\) to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.726, + 0.416, + 0.741 + ], + "angle": 0, + "content": "3.3 Training Data and Template." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.825, + 0.852 + ], + "angle": 0, + "content": "We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.825, + 0.913 + ], + "angle": 0, + "content": "During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in and final answer (option) in tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.174, + 0.089, + 0.825, + 0.282 + ], + "angle": 0, + "content": "
ModelLLM sizeAnswer TypeMVBenchVideo-MME (wo sub)MLVUMMVU (mc)
LLaMA-VID [10]7BOption41.4-33.2-
LLaVA-NeXT [12]7BOption--39.329.2
VideoLLaVA [11]7BOption-39.947.3-
ShareGPT4Video [2]8BOption-39.946.4-
LLaVA-Mini [26]7BOption44.5-42.8-
InternVideo2 [22]8BOption-41.9-39.0
TinyLLaVA-Video-SFT3BOption49.042.249.246.1
TinyLLaVA-Video-ColdStart3BReason33.226.628.622.7
TinyLLaVA-Video-R13BReason49.546.652.446.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.291, + 0.825, + 0.362 + ], + "angle": 0, + "content": "Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. \"Option\" indicates that the model only needs to answer with the selected choice, while \"Reason\" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.383, + 0.316, + 0.397 + ], + "angle": 0, + "content": "3.4 Reward Rules." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.411, + 0.825, + 0.44 + ], + "angle": 0, + "content": "We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.459, + 0.825, + 0.53 + ], + "angle": 0, + "content": "Format reward. We require the thought process to be enclosed within \\( \\langle \\text{think} \\rangle \\langle \\text{/think} \\rangle \\), and the final answer to be enclosed within \\( \\langle \\text{answer} \\rangle \\langle \\text{/answer} \\rangle \\). These four tags can appear only once in the entire response, and if followed, the model will receive a format reward \\( FR = r_0 + LR \\). Here, \\( r_0 \\) represents the base reward for adhering to the required response format, and \\( LR \\) is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.541, + 0.825, + 0.575 + ], + "angle": 0, + "content": "\\[\nL R = \\min \\left(1, \\frac {L e n}{M L}\\right) \\times r _ {1}. \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.586, + 0.825, + 0.629 + ], + "angle": 0, + "content": "Here, \\( Len \\) represents the length of the response extracted from within the \\( <\\text{think}> \\)/\\( <\\text{think}> \\) tags, and \\( ML \\) represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set \\( r_0 = r_1 = 0.5 \\), thus the format reward is limited to a maximum of 1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.648, + 0.825, + 0.733 + ], + "angle": 0, + "content": "Accuracy reward. We design the accuracy reward \\( AR \\) based on the answer. We extract the final answer from and compare it with the label. The model will receive an accuracy reward of \\( AR = r_2 > 0 \\), if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. \\( AR = 0 \\). To ensure that the accuracy reward and the format reward have equal importance, we set \\( r_2 = r_0 + r_1 \\) in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.751, + 0.825, + 0.809 + ], + "angle": 0, + "content": "To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward \\( R \\) defined by the following formula:" + }, + { + "type": "equation", + "bbox": [ + 0.324, + 0.821, + 0.825, + 0.872 + ], + "angle": 0, + "content": "\\[\nR = \\left\\{ \\begin{array}{l l} A R + F R, & \\text {i f} F R > 0 \\text {a n d} A R = r _ {2} \\\\ - F R, & \\text {i f} F R > 0 \\text {a n d} A R = 0 \\\\ - \\left(r _ {0} + r _ {1} + r _ {2}\\right), & \\text {i f} F R = 0 \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.314, + 0.108 + ], + "angle": 0, + "content": "4 Experiments" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.127, + 0.37, + 0.143 + ], + "angle": 0, + "content": "4.1 Experimental Settings" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.155, + 0.825, + 0.199 + ], + "angle": 0, + "content": "We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.204, + 0.825, + 0.26 + ], + "angle": 0, + "content": "To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.266, + 0.826, + 0.323 + ], + "angle": 0, + "content": "For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.348, + 0.434, + 0.362 + ], + "angle": 0, + "content": "4.2 Main Results and Aha Moment" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.377, + 0.825, + 0.462 + ], + "angle": 0, + "content": "As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities." + }, + { + "type": "image", + "bbox": [ + 0.174, + 0.482, + 0.382, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.174, + 0.617, + 0.382, + 0.631 + ], + "angle": 0, + "content": "(a) Evolution in completion length." + }, + { + "type": "image", + "bbox": [ + 0.396, + 0.482, + 0.603, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.4, + 0.617, + 0.598, + 0.631 + ], + "angle": 0, + "content": "(b) Evolution in accuracy reward." + }, + { + "type": "image", + "bbox": [ + 0.615, + 0.482, + 0.822, + 0.61 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.627, + 0.617, + 0.812, + 0.631 + ], + "angle": 0, + "content": "(c) Evolution in format reward." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.639, + 0.825, + 0.681 + ], + "angle": 0, + "content": "Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.705, + 0.825, + 0.788 + ], + "angle": 0, + "content": "As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.794, + 0.825, + 0.865 + ], + "angle": 0, + "content": "Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the \"aha moment\" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.914 + ], + "angle": 0, + "content": "As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.32, + 0.106 + ], + "angle": 0, + "content": "4.3 Ablation Study" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.117, + 0.825, + 0.147 + ], + "angle": 0, + "content": "In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.16, + 0.411, + 0.175 + ], + "angle": 0, + "content": "4.3.1 Impact of Cold-Start Data" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.184, + 0.825, + 0.268 + ], + "angle": 0, + "content": "Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as option . We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.826, + 0.344 + ], + "angle": 0, + "content": "However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.357, + 0.517, + 0.372 + ], + "angle": 0, + "content": "4.3.2 Impact of Refinement of Format Rewards" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.381, + 0.518, + 0.631 + ], + "angle": 0, + "content": "In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4." + }, + { + "type": "image", + "bbox": [ + 0.533, + 0.385, + 0.822, + 0.564 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.527, + 0.573, + 0.825, + 0.603 + ], + "angle": 0, + "content": "Figure 5: The variation in response length during training under different settings." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.644, + 0.461, + 0.659 + ], + "angle": 0, + "content": "4.3.3 Other Experimental Explorations" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.668, + 0.827, + 0.821 + ], + "angle": 0, + "content": "Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.839, + 0.452, + 0.856 + ], + "angle": 0, + "content": "5 Conclusion and Future Work" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.87, + 0.825, + 0.914 + ], + "angle": 0, + "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of \"aha" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.179, + 0.094, + 0.823, + 0.335 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.343, + 0.825, + 0.414 + ], + "angle": 0, + "content": "Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.433, + 0.825, + 0.488 + ], + "angle": 0, + "content": "moment\". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.501, + 0.825, + 0.544 + ], + "angle": 0, + "content": "- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.549, + 0.825, + 0.606 + ], + "angle": 0, + "content": "- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.501, + 0.825, + 0.606 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.621, + 0.825, + 0.673 + ], + "angle": 0, + "content": "Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.694, + 0.268, + 0.709 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.717, + 0.826, + 0.757 + ], + "angle": 0, + "content": "[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.769, + 0.826, + 0.809 + ], + "angle": 0, + "content": "[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.821, + 0.826, + 0.859 + ], + "angle": 0, + "content": "[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.18, + 0.873, + 0.826, + 0.912 + ], + "angle": 0, + "content": "[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6" + }, + { + "type": "list", + "bbox": [ + 0.18, + 0.717, + 0.826, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.092, + 0.826, + 0.131 + ], + "angle": 0, + "content": "[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.14, + 0.825, + 0.18 + ], + "angle": 0, + "content": "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.187, + 0.825, + 0.227 + ], + "angle": 0, + "content": "[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.235, + 0.823, + 0.274 + ], + "angle": 0, + "content": "[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.282, + 0.826, + 0.322 + ], + "angle": 0, + "content": "[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.33, + 0.824, + 0.357 + ], + "angle": 0, + "content": "[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.365, + 0.824, + 0.392 + ], + "angle": 0, + "content": "[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.4, + 0.824, + 0.427 + ], + "angle": 0, + "content": "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.435, + 0.824, + 0.462 + ], + "angle": 0, + "content": "[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.47, + 0.824, + 0.497 + ], + "angle": 0, + "content": "[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.505, + 0.824, + 0.544 + ], + "angle": 0, + "content": "[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.552, + 0.824, + 0.592 + ], + "angle": 0, + "content": "[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.6, + 0.824, + 0.64 + ], + "angle": 0, + "content": "[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.647, + 0.824, + 0.687 + ], + "angle": 0, + "content": "[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.695, + 0.824, + 0.735 + ], + "angle": 0, + "content": "[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.742, + 0.824, + 0.782 + ], + "angle": 0, + "content": "[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.79, + 0.885, + 0.816 + ], + "angle": 0, + "content": "[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.825, + 0.824, + 0.865 + ], + "angle": 0, + "content": "[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.873, + 0.824, + 0.913 + ], + "angle": 0, + "content": "[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.092, + 0.826, + 0.133 + ], + "angle": 0, + "content": "[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.14, + 0.826, + 0.18 + ], + "angle": 0, + "content": "[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.187, + 0.825, + 0.215 + ], + "angle": 0, + "content": "[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.223, + 0.825, + 0.262 + ], + "angle": 0, + "content": "[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.27, + 0.825, + 0.298 + ], + "angle": 0, + "content": "[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.305, + 0.825, + 0.346 + ], + "angle": 0, + "content": "[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.353, + 0.825, + 0.382 + ], + "angle": 0, + "content": "[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.388, + 0.825, + 0.429 + ], + "angle": 0, + "content": "[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6" + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.092, + 0.826, + 0.429 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..d7df3ddd9f14292fc467204309376660df57cba2 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/7ce904a9-a6a0-4d80-91b4-b12c38f5bda3_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69dfcc98a2793c52d40f925ac820892dc6e10956b6a6439c2ee006809c9675ab +size 3597849 diff --git a/data/2025/2504_09xxx/2504.09641/full.md b/data/2025/2504_09xxx/2504.09641/full.md new file mode 100644 index 0000000000000000000000000000000000000000..78e7f14527f00eba04d14a7169611aea097f8345 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/full.md @@ -0,0 +1,225 @@ +# TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning + +Xingjian Zhang $^{1,*}$ Siwei Wen $^{1,2,*}$ Wenjun Wu $^{1,2,3}$ Lei Huang $^{1,2,3,\boxtimes}$ + +$^{1}$ SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China + $^{2}$ Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University + $^{3}$ Hangzhou International Innovation Institute, Beihang University, Hangzhou, China + +{huangleiai}@buaa.edu.cn + +# Abstract + +Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of "aha moments". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1. + +# 1 Introduction + +Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data. + +Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary. + +![](images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg) +Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue). + +![](images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg) + +Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in and final answer (option) in tags. + +# Owen2-VL-7B-Instruct: + + The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. C. pick up + +TinyLLaVA-Video-SFT: C. pick up. + +# TinyLLaVA-Video-R1: + + Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. C + +In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of "aha moments", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models. + +# 2 Related Work + +Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as "aha moments" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models. + +Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance. + +![](images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg) +Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning. + +![](images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg) + +Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in and final answer (option) in tags. + +# Owen2-VL-7B-Instruct: + + The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. A. Bone grafting. + +# TinyLLaVA-Video-SFT: A. + +# TinyLLaVA-Video-R1: + + To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. A + +Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs. + +# 3 Methods + +To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications. + +# 3.1 TinyLLaVA-Video + +TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning. + +# 3.2 Group Relative Policy Optimization (GRPO) + +We follow the GRPO algorithm [17] to train the model. For each question $\mathbf{q}$ , the policy model generates a set of candidate responses $\{O_1, O_2, \dots, O_G\}$ , computes the corresponding rewards + +![](images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg) +Figure 3: Cases of "aha moment", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively. + +$\{r_1, r_2, \ldots, r_G\}$ based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function: + +$$ +J _ {G R P O} (\theta) = \mathbb {E} _ {[ q, \{o _ {i} \} ]} \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| o _ {i} |} \left\{\min \left[ \frac {\pi_ {\theta}}{\pi_ {\theta_ {o l d}}} A _ {i}, \operatorname {c l i p} \left(\frac {\pi_ {\theta}}{\pi_ {\theta_ {o l d}}}, 1 - \epsilon , 1 + \epsilon\right) A _ {i} \right] - \beta \mathbb {D} _ {K L} [ \pi_ {\theta} \| \pi_ {r e f} ] \right\} \tag {1} +$$ + +where $\pi_{\theta}$ and $\pi_{\theta_{old}}$ are the current and old policy, $\epsilon$ and $\beta$ are hyper-parameters, and $A_{i}$ is the advantages defined as: + +$$ +A _ {i} = \frac {r _ {i} - \operatorname {m e a n} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)}{\operatorname {s t d} \left(\left\{r _ {1} , r _ {2} , \cdots , r _ {G} \right\}\right)}. \tag {2} +$$ + +In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set $\{O_i\}$ are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise $\mathcal{N}(0, 0.02^2)$ to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses. + +# 3.3 Training Data and Template. + +We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer. + +During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in and final answer (option) in tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format. + +
ModelLLM sizeAnswer TypeMVBenchVideo-MME (wo sub)MLVUMMVU (mc)
LLaMA-VID [10]7BOption41.4-33.2-
LLaVA-NeXT [12]7BOption--39.329.2
VideoLLaVA [11]7BOption-39.947.3-
ShareGPT4Video [2]8BOption-39.946.4-
LLaVA-Mini [26]7BOption44.5-42.8-
InternVideo2 [22]8BOption-41.9-39.0
TinyLLaVA-Video-SFT3BOption49.042.249.246.1
TinyLLaVA-Video-ColdStart3BReason33.226.628.622.7
TinyLLaVA-Video-R13BReason49.546.652.446.9
+ +Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. "Option" indicates that the model only needs to answer with the selected choice, while "Reason" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface. + +# 3.4 Reward Rules. + +We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows: + +Format reward. We require the thought process to be enclosed within $\langle \text{think} \rangle \langle \text{/think} \rangle$ , and the final answer to be enclosed within $\langle \text{answer} \rangle \langle \text{/answer} \rangle$ . These four tags can appear only once in the entire response, and if followed, the model will receive a format reward $FR = r_0 + LR$ . Here, $r_0$ represents the base reward for adhering to the required response format, and $LR$ is the continuous length reward designed to encourage the model to generate longer outputs, calculated as: + +$$ +L R = \min \left(1, \frac {L e n}{M L}\right) \times r _ {1}. \tag {3} +$$ + +Here, $Len$ represents the length of the response extracted from within the $<\text{think}>$ / $<\text{think}>$ tags, and $ML$ represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set $r_0 = r_1 = 0.5$ , thus the format reward is limited to a maximum of 1. + +Accuracy reward. We design the accuracy reward $AR$ based on the answer. We extract the final answer from and compare it with the label. The model will receive an accuracy reward of $AR = r_2 > 0$ , if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. $AR = 0$ . To ensure that the accuracy reward and the format reward have equal importance, we set $r_2 = r_0 + r_1$ in our experiments. + +To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward $R$ defined by the following formula: + +$$ +R = \left\{ \begin{array}{l l} A R + F R, & \text {i f} F R > 0 \text {a n d} A R = r _ {2} \\ - F R, & \text {i f} F R > 0 \text {a n d} A R = 0 \\ - \left(r _ {0} + r _ {1} + r _ {2}\right), & \text {i f} F R = 0 \end{array} \right. \tag {4} +$$ + +When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred. + +# 4 Experiments + +# 4.1 Experimental Settings + +We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training. + +To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1. + +For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities. + +# 4.2 Main Results and Aha Moment + +As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities. + +![](images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg) +(a) Evolution in completion length. + +![](images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg) +Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training. + +![](images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg) +(b) Evolution in accuracy reward. +(c) Evolution in format reward. + +As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models. + +Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the "aha moment" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection. + +As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking. + +# 4.3 Ablation Study + +In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1. + +# 4.3.1 Impact of Cold-Start Data + +Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as option . We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models. + +However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training. + +# 4.3.2 Impact of Refinement of Format Rewards + +In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4. + +![](images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg) +Figure 5: The variation in response length during training under different settings. + +# 4.3.3 Other Experimental Explorations + +Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models. + +# 5 Conclusion and Future Work + +In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of "aha + +![](images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg) +Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark. + +moment". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows: + +- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data. +- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment. + +Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities. + +# References + +[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3 +[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5 +[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3 +[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6 + +[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2 +[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2 +[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2 +[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3 +[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6 +[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5 +[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5 +[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5 +[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7 +[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1 +[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1 +[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3 +[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3 +[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1 +[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2 +[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7 +[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1 +[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5 +[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4 + +[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7 +[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3 +[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5 +[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3 +[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4 +[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6 +[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's" aha moment" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3 +[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6 \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09641/images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg b/data/2025/2504_09xxx/2504.09641/images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8ebf011b587a23039283558da4d1eea44b5e6dc9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5d3f6bf9efc6143d083056da2685a02b2fd5e8d381204672efeee57cb01ce4e +size 14068 diff --git a/data/2025/2504_09xxx/2504.09641/images/05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg b/data/2025/2504_09xxx/2504.09641/images/05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d4d39f2bb565f26fc8106fb60fb639c8f042a7d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d3e7b370e1d9e2928192b643e1984f4e40774192d1862028816a3ed5b6d5681f +size 5366 diff --git a/data/2025/2504_09xxx/2504.09641/images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg b/data/2025/2504_09xxx/2504.09641/images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1fb88b90ef694334466f42e429f60aae2d7b7d9a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4b3e7e88eb17b27be223e880a676fecf721fa446afc4cf968f594473f6c109d2 +size 27298 diff --git a/data/2025/2504_09xxx/2504.09641/images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg b/data/2025/2504_09xxx/2504.09641/images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg new file mode 100644 index 0000000000000000000000000000000000000000..16eacae6fc90bdd607d3f02032859d2455f7dbfb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38c27bcc0c610c3939ab2c72d24dd800a423d343d34ad80a72ac57ba9bc00e85 +size 12057 diff --git a/data/2025/2504_09xxx/2504.09641/images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg b/data/2025/2504_09xxx/2504.09641/images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c58169b0d64421efcf31769cc95813fa30753daa --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:589b865dbfa9887710afbaffa90f7a2f3098f585e8b6142d67513c19a54dd1e1 +size 67896 diff --git a/data/2025/2504_09xxx/2504.09641/images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg b/data/2025/2504_09xxx/2504.09641/images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg new file mode 100644 index 0000000000000000000000000000000000000000..64992dc3ba3510cfea7a3a6fa268325ba26647de --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b117008456b935b48fd786c3476ba8a1e7d12134fab941393ea8fdf161dbc514 +size 27647 diff --git a/data/2025/2504_09xxx/2504.09641/images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg b/data/2025/2504_09xxx/2504.09641/images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c7db9338e26577aee31c7df1dae9c3811d1a530a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:190c69f1a2cea03db97f71bc3da60222f8a108b0aa72bb7164a0551004144274 +size 134425 diff --git a/data/2025/2504_09xxx/2504.09641/images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg b/data/2025/2504_09xxx/2504.09641/images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg new file mode 100644 index 0000000000000000000000000000000000000000..264eb9b7c57a9521705945898520da2b5acf41c4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:678c5eb23ae866c55cf6ae1f510e2003d743d1f1b8d70aebb0f77693ab0f8aa2 +size 30480 diff --git a/data/2025/2504_09xxx/2504.09641/images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg b/data/2025/2504_09xxx/2504.09641/images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f2bfe225da036443e40b3c8095e04f0d4a012892 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21806f53589a64ad5b8fa868b1a52d32ee1b0d29454a8cada249b7c8ed5fc42b +size 1728 diff --git a/data/2025/2504_09xxx/2504.09641/images/b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg b/data/2025/2504_09xxx/2504.09641/images/b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d1e668cb36abed4ed3c1e3d9f88ad1358ea70c61 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a13cc73240bd94a115358bebc61b374c0e5ebe986fd8445d814c58a5930139b8 +size 7344 diff --git a/data/2025/2504_09xxx/2504.09641/images/c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg b/data/2025/2504_09xxx/2504.09641/images/c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5955e74760c1a80a7454131e968fa00c8984aa7d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6faa166d5e49e1899b026bde6e6da3780d916b5d842be480e4dde82fde40751 +size 10408 diff --git a/data/2025/2504_09xxx/2504.09641/images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg b/data/2025/2504_09xxx/2504.09641/images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aafe39df9a8427d041a1e83b6e82dcaceb333c8e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2d14ba77460d9b6280f06ccda8de283e16fab9b2eda91e88cfaa99d90a0f7e6a +size 77806 diff --git a/data/2025/2504_09xxx/2504.09641/images/d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg b/data/2025/2504_09xxx/2504.09641/images/d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9a8efc40bb60e4ba29aa0e042dbd7e1d4321bdae --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:18585901e6da76ff68138dfbfd35ba3bfe932d456189b56e8ef0ee66d49e557a +size 12334 diff --git a/data/2025/2504_09xxx/2504.09641/images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg b/data/2025/2504_09xxx/2504.09641/images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..985c3049f4522565faf04dc52394f1341bc8d4c4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8de325d2e15875e700101cab4140bf73521c84c58f32f75ca1889c08db495b6e +size 12228 diff --git a/data/2025/2504_09xxx/2504.09641/images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg b/data/2025/2504_09xxx/2504.09641/images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3c9a054e123d3bb600fe21ba0b41be5958cc69fe --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/images/fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a57b6fb8f2a1a55791bf40023456cfcb38a4d74c996c6d4729a046497bdd1a6d +size 1660 diff --git a/data/2025/2504_09xxx/2504.09641/layout.json b/data/2025/2504_09xxx/2504.09641/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..9d887cbbcb7de1b69c6ca2060c70df13ab0f274a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09641/layout.json @@ -0,0 +1,5493 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 114, + 96, + 500, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 96, + 500, + 138 + ], + "spans": [ + { + "bbox": [ + 114, + 96, + 500, + 138 + ], + "type": "text", + "content": "TinyLLaVA-Video-R1: Towards Smaller LMMs for Video Reasoning" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "spans": [ + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "text", + "content": "Xingjian Zhang" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "inline_equation", + "content": "^{1,*}" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "text", + "content": " Siwei Wen" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "inline_equation", + "content": "^{1,2,*}" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "text", + "content": " Wenjun Wu" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "inline_equation", + "content": "^{1,2,3}" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "text", + "content": " Lei Huang" + }, + { + "bbox": [ + 150, + 177, + 470, + 192 + ], + "type": "inline_equation", + "content": "^{1,2,3,\\boxtimes}" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "spans": [ + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "text", + "content": "SKLCCSE, Institute of Artificial Intelligence, Beihang University, Beijing, China \n" + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "text", + "content": "Beijing Advanced Innovation Center for Future Blockchain and Privacy Computing, Beihang University \n" + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 113, + 201, + 496, + 236 + ], + "type": "text", + "content": "Hangzhou International Innovation Institute, Beihang University, Hangzhou, China" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 241, + 244, + 370, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 241, + 244, + 370, + 258 + ], + "spans": [ + { + "bbox": [ + 241, + 244, + 370, + 258 + ], + "type": "text", + "content": "{huangleiai}@buaa.edu.cn" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 281, + 285, + 331, + 298 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 285, + 331, + 298 + ], + "spans": [ + { + "bbox": [ + 281, + 285, + 331, + 298 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 140, + 311, + 471, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 311, + 471, + 488 + ], + "spans": [ + { + "bbox": [ + 140, + 311, + 471, + 488 + ], + "type": "text", + "content": "Recently, improving the reasoning ability of large multimodal models (LMMs) through reinforcement learning has made great progress. However, most existing works are based on highly reasoning-intensive datasets such as mathematics and code, and researchers generally choose large-scale models as the foundation. We argue that exploring small-scale models' reasoning capabilities remains valuable for researchers with limited computational resources. Moreover, enabling models to explain their reasoning processes on general question-answering datasets is equally meaningful. Therefore, we present the small-scale video reasoning model TinyLLaVA-Video-R1. Based on TinyLLaVA-Video [27], a traceably trained video understanding model with no more than 4B parameters, it not only demonstrates significantly improved reasoning and thinking capabilities after using reinforcement learning on general Video-QA datasets, but also exhibits the emergent characteristic of \"aha moments\". Furthermore, we share a series of experimental findings, aiming to provide practical insights for future exploration of video reasoning (thinking) abilities in small-scale models. It is available at https://github.com/ZhangXJ199/TinyLLaVA-Video-R1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 511, + 193, + 525 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 511, + 193, + 525 + ], + "spans": [ + { + "bbox": [ + 105, + 511, + 193, + 525 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 536, + 506, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 616 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 616 + ], + "type": "text", + "content": "Since DeepSeek-R1 [6] demonstrated that pure reinforcement learning can significantly enhance a model's reasoning capabilities, many subsequent works [1, 7, 15, 30, 16] have also explored improving the reasoning abilities of multimodal models, achieving notable progress. Most of these efforts focus on extending reasoning capabilities to the image modality [15, 1], conducting research using strong reasoning data such as math-image pairs and spatial reasoning [16, 7, 30], or task-specific data like grounding [14, 18]. However, existing research on video reasoning models has not made significant progress due to the scarcity of highly reasoning-intensive data." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 619, + 507, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 619, + 507, + 698 + ], + "spans": [ + { + "bbox": [ + 104, + 619, + 507, + 698 + ], + "type": "text", + "content": "Open-R1-Video [21] is the first to introduce reasoning into the video domain, however, its performance on general Video-QA datasets is unsatisfactory, with benchmark results even showing a decline. Subsequently, Video-R1 [3] successfully integrates strong reasoning image-text pairs for video reasoning, achieving remarkable performance, but they make preliminary attempts and argue that small-scale models cannot produce effective reasoning processes. However, the high computational cost of large-scale models remains a significant barrier for many researchers with limited resources. Therefore, exploring the reasoning capabilities of smaller models is still necessary." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.09641v1 [cs.CV] 13 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 708, + 346, + 719 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 708, + 346, + 719 + ], + "spans": [ + { + "bbox": [ + 116, + 708, + 346, + 719 + ], + "type": "text", + "content": "\\*denotes equal contributor; " + }, + { + "bbox": [ + 116, + 708, + 346, + 719 + ], + "type": "inline_equation", + "content": "\\boxtimes" + }, + { + "bbox": [ + 116, + 708, + 346, + 719 + ], + "type": "text", + "content": " denotes corresponding author." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 79, + 496, + 141 + ], + "blocks": [ + { + "bbox": [ + 114, + 79, + 496, + 141 + ], + "lines": [ + { + "bbox": [ + 114, + 79, + 496, + 141 + ], + "spans": [ + { + "bbox": [ + 114, + 79, + 496, + 141 + ], + "type": "image", + "image_path": "a48324f62da0204cc99f451db97ddb62f34764e48d17bac21c665934c33c9153.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "lines": [ + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 344, + 504, + 378 + ], + "type": "text", + "content": "Figure 1: A case of TinyLLaVA-Video-R1 on video understanding data, sourced from MVBench. The model demonstrates the ability to perceive video scenes and analyze options, while also exhibiting reflective and backtracking behavior (highlighted in blue)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 155, + 129, + 171 + ], + "blocks": [ + { + "bbox": [ + 111, + 155, + 129, + 171 + ], + "lines": [ + { + "bbox": [ + 111, + 155, + 129, + 171 + ], + "spans": [ + { + "bbox": [ + 111, + 155, + 129, + 171 + ], + "type": "image", + "image_path": "b2737581e44d006f4fed3337110ff85f27b8fb16a5ba0b70b75bade3bd5bc6cb.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 152, + 484, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 484, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 484, + 175 + ], + "type": "text", + "content": "Question: What is the action performed by the person in the video? A. drop. B. sit down. C. pick up. D.squat down. Output the thinking process in and final answer (option) in tags." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 184, + 193, + 191 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 184, + 193, + 191 + ], + "spans": [ + { + "bbox": [ + 116, + 184, + 193, + 191 + ], + "type": "text", + "content": "Owen2-VL-7B-Instruct:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 191, + 485, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 191, + 485, + 213 + ], + "spans": [ + { + "bbox": [ + 112, + 191, + 485, + 213 + ], + "type": "text", + "content": " The person in the video is seen bending down towards the floor. Given the context of the room and the action being performed, it is reasonable to infer that the person is picking something up from the floor. C. pick up " + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 222, + 235, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 222, + 235, + 232 + ], + "spans": [ + { + "bbox": [ + 117, + 222, + 235, + 232 + ], + "type": "text", + "content": "TinyLLaVA-Video-SFT: C. pick up." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 242, + 186, + 248 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 242, + 186, + 248 + ], + "spans": [ + { + "bbox": [ + 116, + 242, + 186, + 248 + ], + "type": "text", + "content": "TinyLLaVA-Video-R1:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 112, + 249, + 489, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 249, + 489, + 327 + ], + "spans": [ + { + "bbox": [ + 112, + 249, + 489, + 327 + ], + "type": "text", + "content": " Let's start by analyzing the video. The video takes place in a room with white curtains, a green chair, a white table, and a white cabinet. The person is wearing a light green shirt, dark pants, and black shoes. The video begins with the person standing in the room, and then they start to move, bending down and picking up a colorful book or magazine from the floor. The video ends with the person standing up and holding the book or magazine. The question asks what the person is doing in the video. Let's look at the options: A. drop, B. sit down, C. pick up, D. squat down. Option A is incorrect because the person does not drop anything. Option B is incorrect because the person does not sit down. Option D is incorrect because the person does not squat down. Option C is correct because the person is picking up a colorful book or magazine from the floor. Let's confirm this by looking at the video again. The person is seen bending down and picking up a colorful book or magazine from the floor, and then standing up holding it. Therefore, the correct answer is C. C " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 396, + 504, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 396, + 504, + 473 + ], + "spans": [ + { + "bbox": [ + 104, + 396, + 504, + 473 + ], + "type": "text", + "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, based on the traceably trained model TinyLLaVA-Video [27]. After reinforcement learning on general VideoQA datasets, the model not only significantly improves its reasoning and thinking abilities, but also exhibits the emergent characteristic of \"aha moments\", which is more meaningful than simply generating answers through perception. Moreover, through extensive experiments under various configurations, we have obtained a series of insightful findings. We believe these discoveries will provide valuable guidance for future exploration of video reasoning capabilities in small-scale models." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 492, + 197, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 197, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 197, + 504 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 519, + 506, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 519, + 506, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 519, + 506, + 629 + ], + "type": "text", + "content": "Large Language Reasoning Models. Recently, Kimi K1.5 [19] and DeepSeek-R1 [6] have gained widespread attention for their significant improvements in model performance on reasoning tasks through using reinforcement learning. Unlike approaches relying on process-supervised reward models [5], the Group Relative Policy Optimization (GRPO) algorithm [17] relying on rule-based rewards not only substantially reduces computational costs but has also sparked a new wave of interest due to intriguing phenomena such as \"aha moments\" observed during training. Under the influence of this research paradigm, numerous follow-up studies have advanced along this technical path, some have successfully replicated the method and extended it to multimodal domains [1, 7, 30], achieving notable progress, while others [13, 24] have focused on algorithmic optimization to further enhance the reasoning efficiency and performance of models." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "Multimodal Reasoning Models. With the remarkable advancement in reasoning capabilities of language models, an increasing number of studies have attempted to extend them to multimodal domains. LMM-R1 [16] proposes a two-stage training strategy to enhance the reasoning performance of multimodal models; Vision-R1 [7] attempts to address the post-cold-start overthinking issue in multimodal models; Video-R1 [3] develops T-GRPO to further enhance model's video comprehension. While these studies have made notable progress, most tend to adopt base models with 7B or more parameters to ensure superior reasoning effectiveness and robust performance." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 79, + 495, + 144 + ], + "blocks": [ + { + "bbox": [ + 114, + 79, + 495, + 144 + ], + "lines": [ + { + "bbox": [ + 114, + 79, + 495, + 144 + ], + "spans": [ + { + "bbox": [ + 114, + 79, + 495, + 144 + ], + "type": "image", + "image_path": "1453d1a2306639f06a704be05737468f804087f3356b59b34a5b76832f899987.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 345, + 504, + 378 + ], + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 378 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 378 + ], + "type": "text", + "content": "Figure 2: A case of TinyLLaVA-Video-R1 on video reasoning data, sourced from MMVU. The model demonstrates comprehensive video content understanding and the capability to derive correct answers through analytical reasoning." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 112, + 156, + 129, + 172 + ], + "blocks": [ + { + "bbox": [ + 112, + 156, + 129, + 172 + ], + "lines": [ + { + "bbox": [ + 112, + 156, + 129, + 172 + ], + "spans": [ + { + "bbox": [ + 112, + 156, + 129, + 172 + ], + "type": "image", + "image_path": "fa76093cb3c6878e23da96ffba855d49400a5e0f6c82027d9ef57ea8b3ddadcd.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 152, + 497, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 152, + 497, + 175 + ], + "spans": [ + { + "bbox": [ + 130, + 152, + 497, + 175 + ], + "type": "text", + "content": "Question: What is the treatment used in the video? A. Bone grafting. B. Hormone therapy. C. Chemotherapy. D. Photodynamic therapy. Output the thinking process in and final answer (option) in tags." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 116, + 185, + 193, + 192 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 185, + 193, + 192 + ], + "spans": [ + { + "bbox": [ + 116, + 185, + 193, + 192 + ], + "type": "text", + "content": "Owen2-VL-7B-Instruct:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 112, + 192, + 495, + 214 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 112, + 192, + 495, + 214 + ], + "spans": [ + { + "bbox": [ + 112, + 192, + 495, + 214 + ], + "type": "text", + "content": " The video shows a medical procedure involving bone chips and fibrin glue, which are used to repair a damaged knee joint. This procedure is known as autologous bone grafting (ABG). Therefore, the treatment used in the video is bone grafting. A. Bone grafting. " + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 117, + 223, + 203, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 223, + 203, + 232 + ], + "spans": [ + { + "bbox": [ + 117, + 223, + 203, + 232 + ], + "type": "text", + "content": "TinyLLaVA-Video-SFT: A." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 116, + 243, + 186, + 249 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 243, + 186, + 249 + ], + "spans": [ + { + "bbox": [ + 116, + 243, + 186, + 249 + ], + "type": "text", + "content": "TinyLLaVA-Video-R1:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 249, + 495, + 327 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 249, + 495, + 327 + ], + "spans": [ + { + "bbox": [ + 111, + 249, + 495, + 327 + ], + "type": "text", + "content": " To determine the treatment for osteochondral lesions, I will first review the types of osteochondral lesions to focus on the focus. The different types of osteochondral lesions include superficial and severe articular cartilage injuries with or without bone fragmentation. After defining the type of osteochondral lesion, the next step is to identify the treatment for it. I will first consider the available options, such as bone grafting (A), hormone therapy (B), chemotherapy (C), photodynamic therapy (D), and radiation therapy (E). Looking at the video, I can see the focus on a patient with a severe articular cartilage injury and bone fragmentation. Based on our knowledge of cartilage and bone diseases, I will select the treatment A, which is bone grafting. The video will show a comparison between normal and severely damaged cartilage, illustrating the treatment of severe articular cartilage injury with bone grafting using a surgical tool to fill the gap. Bone grafting is the correct treatment for this type of osteochondral lesion. Therefore, I will select option A. A " + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 388, + 504, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 388, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 388, + 504, + 465 + ], + "type": "text", + "content": "Although some preliminary work has explored small-scale multimodal models [16, 1, 30], these investigations have primarily focused on the image modality, leaving research on video reasoning significantly underdeveloped. Due to the scarcity of high-quality video reasoning datasets and the inherent capability limitations of models, small-scale video reasoning models face multiple challenges including limited response length, difficulties in presenting coherent reasoning chains, and constrained overall performance improvement [3]. These critical challenges demand thorough investigation and substantive technical breakthroughs." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 477, + 171, + 489 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 477, + 171, + 489 + ], + "spans": [ + { + "bbox": [ + 105, + 477, + 171, + 489 + ], + "type": "text", + "content": "3 Methods" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 498, + 504, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 498, + 504, + 552 + ], + "spans": [ + { + "bbox": [ + 104, + 498, + 504, + 552 + ], + "type": "text", + "content": "To explore the video reasoning capabilities of small-scale models, we conduct experiments on TinyLLaVA-Video [27]. We utilize the GRPO algorithm on the general Video-QA dataset NextQA and made specific modifications to the reward rules: adding a continuous length reward to the format reward and introducing penalties for incorrect answers. The experimental results in Section 4 demonstrate the effectiveness of these modifications." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 562, + 209, + 574 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 562, + 209, + 574 + ], + "spans": [ + { + "bbox": [ + 105, + 562, + 209, + 574 + ], + "type": "text", + "content": "3.1 TinyLLaVA-Video" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 582, + 504, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 582, + 504, + 671 + ], + "spans": [ + { + "bbox": [ + 104, + 582, + 504, + 671 + ], + "type": "text", + "content": "TinyLLaVA-Video is a fully open-source small-scale video understanding model that employs Qwen2.5-3B [8] as its language model and SigLIP [25] as its visual encoder. It delivers competitive performance across multiple benchmarks. Crucially, its training data are fully open-sourced, and the entire training process remains traceable. This effectively prevents the repeated use of identical data across different training phases, thereby avoiding the introduction of uncontrolled variables and ensuring more reliable experimental results and conclusions. Such reproducibility and controllability represent a distinct advantage over models that only release weights, making TinyLLaVA-Video an ideal foundational model for our experiments on investigating video reasoning." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 679, + 321, + 691 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 679, + 321, + 691 + ], + "spans": [ + { + "bbox": [ + 105, + 679, + 321, + 691 + ], + "type": "text", + "content": "3.2 Group Relative Policy Optimization (GRPO)" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "We follow the GRPO algorithm [17] to train the model. For each question " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\mathbf{q}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ", the policy model generates a set of candidate responses " + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "inline_equation", + "content": "\\{O_1, O_2, \\dots, O_G\\}" + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": ", computes the corresponding rewards" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 71, + 504, + 311 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 504, + 311 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 504, + 311 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 504, + 311 + ], + "type": "image", + "image_path": "5c540f5f921a30b87ba9783ba04294ba1585fcacdcd6fb3a62bcfe3c4189c960.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "lines": [ + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "spans": [ + { + "bbox": [ + 104, + 317, + 504, + 340 + ], + "type": "text", + "content": "Figure 3: Cases of \"aha moment\", where the model demonstrates reflection and backtracking during its reasoning process (highlighted in blue). The cases are from MVBench and MMVU respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 356, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 356, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 356, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\{r_1, r_2, \\ldots, r_G\\}" + }, + { + "bbox": [ + 104, + 356, + 504, + 392 + ], + "type": "text", + "content": " based on the reward rules. And then these rewards are normalized to calculate the advantage for each response. Subsequently, the model is optimized through maximization of the following objective function:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 121, + 399, + 504, + 422 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 399, + 504, + 422 + ], + "spans": [ + { + "bbox": [ + 121, + 399, + 504, + 422 + ], + "type": "interline_equation", + "content": "J _ {G R P O} (\\theta) = \\mathbb {E} _ {[ q, \\{o _ {i} \\} ]} \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\left\\{\\min \\left[ \\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}} A _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta}}{\\pi_ {\\theta_ {o l d}}}, 1 - \\epsilon , 1 + \\epsilon\\right) A _ {i} \\right] - \\beta \\mathbb {D} _ {K L} [ \\pi_ {\\theta} \\| \\pi_ {r e f} ] \\right\\} \\tag {1}", + "image_path": "c028c6a75c1c71f4e565c9681cf2ede2f12cf9fc48793116e93b6718b118b7ec.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "spans": [ + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta_{old}}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " are the current and old policy, " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " are hyper-parameters, and " + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "inline_equation", + "content": "A_{i}" + }, + { + "bbox": [ + 104, + 429, + 504, + 452 + ], + "type": "text", + "content": " is the advantages defined as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 230, + 455, + 504, + 482 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 230, + 455, + 504, + 482 + ], + "spans": [ + { + "bbox": [ + 230, + 455, + 504, + 482 + ], + "type": "interline_equation", + "content": "A _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , r _ {2} , \\cdots , r _ {G} \\right\\}\\right)}. \\tag {2}", + "image_path": "b7f84bb0c6fdf03dc58d42b8a8e2990ec5f33aee8c4c7afb886a25430c954304.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "content": "In addition, our experimental observations reveal an issue analogous to DAPO [24]: when all responses in a set " + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "inline_equation", + "content": "\\{O_i\\}" + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "content": " are correct and given equal rewards, their computed advantages vanish to zero. This phenomenon affects policy updates and diminishes sample efficiency. To maximize the utility of each sample, we introduce an additional gaussian noise " + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "inline_equation", + "content": "\\mathcal{N}(0, 0.02^2)" + }, + { + "bbox": [ + 104, + 492, + 504, + 559 + ], + "type": "text", + "content": " to the advantages. Although the noise induces only minor perturbations, it ensures intra-group advantage diversity across responses." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 574, + 254, + 586 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 574, + 254, + 586 + ], + "spans": [ + { + "bbox": [ + 105, + 574, + 254, + 586 + ], + "type": "text", + "content": "3.3 Training Data and Template." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 596, + 504, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 504, + 674 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 504, + 674 + ], + "type": "text", + "content": "We select multiple choice questions from the NextQA [23] subset of LLaVA-Video-178K [28] as training data. To maintain manageable training time with limited computational resources, we only choose the subset of data with a duration of 0 to 30 seconds, which contains 5,496 samples. It is a weak reasoning dataset, where the questions are more perception-oriented and exhibit weaker logical reasoning. However, we hypothesize that the model's reasoning abilities are likely predominantly derived from reinforcement learning, and we still aim to guide it to demonstrate its thought process by articulating the reasoning behind its choices, rather than merely providing an answer." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 504, + 723 + ], + "type": "text", + "content": "During training, for each input question, in addition to the system template, we append the following prompt at the end: Output the thinking process in and final answer (option) in tags. Moreover, when computing rewards for responses, we strictly enforce the model to adhere to this format." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 106, + 70, + 504, + 223 + ], + "blocks": [ + { + "bbox": [ + 106, + 70, + 504, + 223 + ], + "lines": [ + { + "bbox": [ + 106, + 70, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 106, + 70, + 504, + 223 + ], + "type": "table", + "html": "
ModelLLM sizeAnswer TypeMVBenchVideo-MME (wo sub)MLVUMMVU (mc)
LLaMA-VID [10]7BOption41.4-33.2-
LLaVA-NeXT [12]7BOption--39.329.2
VideoLLaVA [11]7BOption-39.947.3-
ShareGPT4Video [2]8BOption-39.946.4-
LLaVA-Mini [26]7BOption44.5-42.8-
InternVideo2 [22]8BOption-41.9-39.0
TinyLLaVA-Video-SFT3BOption49.042.249.246.1
TinyLLaVA-Video-ColdStart3BReason33.226.628.622.7
TinyLLaVA-Video-R13BReason49.546.652.446.9
", + "image_path": "35ec78c343f589aa72f0f2b4daca8a7ec35abc3ba14def0a6a833156be5653cd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 230, + 504, + 286 + ], + "lines": [ + { + "bbox": [ + 104, + 230, + 504, + 286 + ], + "spans": [ + { + "bbox": [ + 104, + 230, + 504, + 286 + ], + "type": "text", + "content": "Table 1: The performance of TinyLLaVA-Video-R1 on multiple benchmarks. \"Option\" indicates that the model only needs to answer with the selected choice, while \"Reason\" means the model must output both the answer and the reasoning process according to the format requirements. Here, MMVU is categorized as a video reasoning benchmark, the remaining benchmarks are designed for general-purpose video evaluation. The best results are indicated by boldface." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 105, + 303, + 193, + 314 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 303, + 193, + 314 + ], + "spans": [ + { + "bbox": [ + 105, + 303, + 193, + 314 + ], + "type": "text", + "content": "3.4 Reward Rules." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "spans": [ + { + "bbox": [ + 104, + 325, + 504, + 348 + ], + "type": "text", + "content": "We also avoid using a reward model and define reward rules based on the format and accuracy of the responses as follows:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "spans": [ + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": "Format reward. We require the thought process to be enclosed within " + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "inline_equation", + "content": "\\langle \\text{think} \\rangle \\langle \\text{/think} \\rangle" + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": ", and the final answer to be enclosed within " + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "inline_equation", + "content": "\\langle \\text{answer} \\rangle \\langle \\text{/answer} \\rangle" + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": ". These four tags can appear only once in the entire response, and if followed, the model will receive a format reward " + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "inline_equation", + "content": "FR = r_0 + LR" + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": ". Here, " + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "inline_equation", + "content": "r_0" + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": " represents the base reward for adhering to the required response format, and " + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "inline_equation", + "content": "LR" + }, + { + "bbox": [ + 104, + 363, + 504, + 419 + ], + "type": "text", + "content": " is the continuous length reward designed to encourage the model to generate longer outputs, calculated as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 246, + 428, + 504, + 455 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 428, + 504, + 455 + ], + "spans": [ + { + "bbox": [ + 246, + 428, + 504, + 455 + ], + "type": "interline_equation", + "content": "L R = \\min \\left(1, \\frac {L e n}{M L}\\right) \\times r _ {1}. \\tag {3}", + "image_path": "05940639d82d38045b2319a9d1473fb595debc46d1d349a320af83d414a9b9cc.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "spans": [ + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "inline_equation", + "content": "Len" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": " represents the length of the response extracted from within the " + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "inline_equation", + "content": "<\\text{think}>" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": "/" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "inline_equation", + "content": "<\\text{think}>" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": " tags, and " + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "inline_equation", + "content": "ML" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": " represents the maximum length corresponding to the upper limit of the reward. In our experiments, we set " + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "inline_equation", + "content": "r_0 = r_1 = 0.5" + }, + { + "bbox": [ + 104, + 464, + 504, + 498 + ], + "type": "text", + "content": ", thus the format reward is limited to a maximum of 1." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "spans": [ + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "content": "Accuracy reward. We design the accuracy reward " + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "inline_equation", + "content": "AR" + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "content": " based on the answer. We extract the final answer from and compare it with the label. The model will receive an accuracy reward of " + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "inline_equation", + "content": "AR = r_2 > 0" + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "content": ", if the answer is correct. Responses with either format errors preventing answer extraction or incorrect answers will result in zero accuracy reward, i.e. " + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "inline_equation", + "content": "AR = 0" + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "content": ". To ensure that the accuracy reward and the format reward have equal importance, we set " + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "inline_equation", + "content": "r_2 = r_0 + r_1" + }, + { + "bbox": [ + 104, + 513, + 504, + 580 + ], + "type": "text", + "content": " in our experiments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 594, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 594, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 594, + 504, + 640 + ], + "type": "text", + "content": "To encourage the model to increase the response length only when answering correctly, rather than arbitrarily increasing the length at the cost of accuracy, we deviate from most existing approaches that simply define the total reward as the sum of format reward and accuracy reward. Instead, we introduce a penalty for incorrect answers, with the total reward " + }, + { + "bbox": [ + 104, + 594, + 504, + 640 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 104, + 594, + 504, + 640 + ], + "type": "text", + "content": " defined by the following formula:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 198, + 650, + 504, + 690 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 198, + 650, + 504, + 690 + ], + "spans": [ + { + "bbox": [ + 198, + 650, + 504, + 690 + ], + "type": "interline_equation", + "content": "R = \\left\\{ \\begin{array}{l l} A R + F R, & \\text {i f} F R > 0 \\text {a n d} A R = r _ {2} \\\\ - F R, & \\text {i f} F R > 0 \\text {a n d} A R = 0 \\\\ - \\left(r _ {0} + r _ {1} + r _ {2}\\right), & \\text {i f} F R = 0 \\end{array} \\right. \\tag {4}", + "image_path": "d230ef1b85351ef5e90a50e68411b841c08e2f6235d4f5db26fdbb49de9bca68.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "When the model's answer is correct, the longer the reasoning process, the higher the reward. In contrast, if the answer is incorrect, the longer the reasoning process, the higher the penalty incurred." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 192, + 85 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 100, + 226, + 113 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 100, + 226, + 113 + ], + "spans": [ + { + "bbox": [ + 105, + 100, + 226, + 113 + ], + "type": "text", + "content": "4.1 Experimental Settings" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 122, + 504, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 122, + 504, + 157 + ], + "spans": [ + { + "bbox": [ + 104, + 122, + 504, + 157 + ], + "type": "text", + "content": "We conduct experiments on 8 NVIDIA A100-40G GPUs. During training, we keep the vision encoder frozen and update the connector and language model. We set the learning rate at 1e-6 for stable training." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 161, + 504, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 161, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 104, + 161, + 504, + 205 + ], + "type": "text", + "content": "To facilitate rapid adaptation to reasoning format and ensure training stability, we first finetune the model using 16 human-annotated cold-start samples, resulting in TinyLLaVA-Video-ColdStart. We then adopt it as the base model for reinforcement learning and train on 5,496 NextQA data for one epoch to obtain TinyLLaVA-Video-R1." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 210, + 505, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 210, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 505, + 255 + ], + "type": "text", + "content": "For evaluation, we select four commonly used video understanding and reasoning benchmarks: MVBench [9], VideoMME[4], MLVU [31], and MMVU [29]. These benchmarks encompass videos from multiple disciplines and domains, with a wide range of durations, enabling a comprehensive assessment of the model's capabilities." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 275, + 265, + 286 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 275, + 265, + 286 + ], + "spans": [ + { + "bbox": [ + 105, + 275, + 265, + 286 + ], + "type": "text", + "content": "4.2 Main Results and Aha Moment" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 298, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 298, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 298, + 504, + 365 + ], + "type": "text", + "content": "As shown in Figure 4, during training, both the response length and rewards demonstrate stable growth. As presented in Table 1, compared to TinyLLaVA-Video-SFT, which is trained on the same dataset using supervised learning, TinyLLaVA-Video-R1 shows superior performance across multiple benchmarks. Additionally, compared to the base model TinyLLaVA-Video-ColdStart, TinyLLaVA-Video-R1 not only adheres to the required response format but also demonstrates improved reasoning capabilities." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 106, + 381, + 233, + 483 + ], + "blocks": [ + { + "bbox": [ + 106, + 381, + 233, + 483 + ], + "lines": [ + { + "bbox": [ + 106, + 381, + 233, + 483 + ], + "spans": [ + { + "bbox": [ + 106, + 381, + 233, + 483 + ], + "type": "image", + "image_path": "2c4b9c8c8c39bbfa55734606c1c3da8f539e9ab366970e2b70ad1cdced2df662.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 106, + 488, + 233, + 499 + ], + "lines": [ + { + "bbox": [ + 106, + 488, + 233, + 499 + ], + "spans": [ + { + "bbox": [ + 106, + 488, + 233, + 499 + ], + "type": "text", + "content": "(a) Evolution in completion length." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 242, + 381, + 369, + 483 + ], + "blocks": [ + { + "bbox": [ + 242, + 381, + 369, + 483 + ], + "lines": [ + { + "bbox": [ + 242, + 381, + 369, + 483 + ], + "spans": [ + { + "bbox": [ + 242, + 381, + 369, + 483 + ], + "type": "image", + "image_path": "04dcf3bd0f8ace197ae4eee787c12e0f57a8cf18ada8ec8ceb38f4bb8e32b5b2.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 506, + 504, + 539 + ], + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 539 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 539 + ], + "type": "text", + "content": "Figure 4: Evolution in key metrics during the training of TinyLLaVA-Video-R1. Under our reward rule settings, both the response length and rewards of TinyLLaVA-Video-R1 gradually increased during training." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 376, + 381, + 503, + 483 + ], + "blocks": [ + { + "bbox": [ + 244, + 488, + 365, + 499 + ], + "lines": [ + { + "bbox": [ + 244, + 488, + 365, + 499 + ], + "spans": [ + { + "bbox": [ + 244, + 488, + 365, + 499 + ], + "type": "text", + "content": "(b) Evolution in accuracy reward." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 376, + 381, + 503, + 483 + ], + "lines": [ + { + "bbox": [ + 376, + 381, + 503, + 483 + ], + "spans": [ + { + "bbox": [ + 376, + 381, + 503, + 483 + ], + "type": "image", + "image_path": "ebe8f72aa33ef4305b3c502467b2a8d80d596a0ae85d0da9a16d8c211d6097af.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 383, + 488, + 496, + 499 + ], + "lines": [ + { + "bbox": [ + 383, + 488, + 496, + 499 + ], + "spans": [ + { + "bbox": [ + 383, + 488, + 496, + 499 + ], + "type": "text", + "content": "(c) Evolution in format reward." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "spans": [ + { + "bbox": [ + 104, + 558, + 504, + 624 + ], + "type": "text", + "content": "As shown in Figures 1 and 2, we further illustrate the model's reasoning ability. The model can comprehend and analyze video content, evaluate each option step-by-step, and ultimately provide an answer. Compared to models that only output answers without reasoning, TinyLLaVA-Video-R1 generates meaningful thought processes, making its responses more interpretable and valuable. This represents a significant advantage of video reasoning models over conventional video understanding models." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 104, + 628, + 504, + 685 + ], + "type": "text", + "content": "Similar to other works that use reinforcement learning to enhance model reasoning capabilities, we also reproduce the \"aha moment\" in TinyLLaVA-Video-R1, where the model exhibits emergent behaviors such as self-verification during its reasoning process. Our experimental results confirm that even when trained with weakly-reasoned general video data through reinforcement learning, the smaller model can still demonstrate retrospection and reflection." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "As highlighted in the blue annotations in Figures 1 and 3, the model revisits and verifies its initial reasoning after completing a round of thought. This behavior indicates that the model does not merely perform perception but also engages in continuous thinking and self-checking." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 195, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 195, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 195, + 83 + ], + "type": "text", + "content": "4.3 Ablation Study" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 92, + 504, + 116 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 92, + 504, + 116 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 504, + 116 + ], + "type": "text", + "content": "In this section, we present ablation studies on methods and key experimental findings that contribute significantly to the performance enhancement of TinyLLaVA-Video-R1." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 126, + 251, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 126, + 251, + 138 + ], + "spans": [ + { + "bbox": [ + 104, + 126, + 251, + 138 + ], + "type": "text", + "content": "4.3.1 Impact of Cold-Start Data" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 145, + 504, + 212 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 145, + 504, + 212 + ], + "spans": [ + { + "bbox": [ + 104, + 145, + 504, + 212 + ], + "type": "text", + "content": "Due to the limitations of language models, when we directly use TinyLLaVA-Video as the base model without length reward, we find that as training progresses, the model has a certain probability of learning to 'take shortcuts'. While adhering to the required format, all responses omit the reasoning process and are structured strictly as option . We observe similar experimental phenomena when conducting experiments on Qwen2-VL-2B [20], so we believe this is a common issue with small-scale models." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 505, + 272 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 505, + 272 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 505, + 272 + ], + "type": "text", + "content": "However, when we perform a cold start with 16 human-annotated CoT data, this phenomenon no longer appear during the experiments. At the same time, the model also learn to comply with the format requirements more quickly. Therefore, we believe that cold starting is necessary for reasoning in small-scale models. Even a small amount of cold start data can be very helpful for stabilizing model training." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 282, + 316, + 294 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 282, + 316, + 294 + ], + "spans": [ + { + "bbox": [ + 104, + 282, + 316, + 294 + ], + "type": "text", + "content": "4.3.2 Impact of Refinement of Format Rewards" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 301, + 317, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 301, + 317, + 499 + ], + "spans": [ + { + "bbox": [ + 104, + 301, + 317, + 499 + ], + "type": "text", + "content": "In our initial experiments, similar to other works, we only apply format reward without incorporating continuous length reward. However, constrained by the capabilities of small-scale language models, training under this setup does not lead to an increase in response length, and even results in a slight decline. After introducing continuous length reward, the model's response length significantly increases during training, as shown in Figure 5. However, we observe that under this setup, the model engages in some meaningless reasoning to increase response length, which does not improve performance and even leads to a significant increase in training time. When incorporating answer correctness penalty into the total reward as described in Section 3.4, we observe both qualitative improvements in model responses and continued growth in output length and rewards throughout training as shown in Figure 4." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 326, + 304, + 503, + 446 + ], + "blocks": [ + { + "bbox": [ + 326, + 304, + 503, + 446 + ], + "lines": [ + { + "bbox": [ + 326, + 304, + 503, + 446 + ], + "spans": [ + { + "bbox": [ + 326, + 304, + 503, + 446 + ], + "type": "image", + "image_path": "4f4bcd5696d26eb12b55606a229bf0bb0d5813359d950d5f69eb13a247219735.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 322, + 453, + 504, + 477 + ], + "lines": [ + { + "bbox": [ + 322, + 453, + 504, + 477 + ], + "spans": [ + { + "bbox": [ + 322, + 453, + 504, + 477 + ], + "type": "text", + "content": "Figure 5: The variation in response length during training under different settings." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 510, + 282, + 521 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 510, + 282, + 521 + ], + "spans": [ + { + "bbox": [ + 104, + 510, + 282, + 521 + ], + "type": "text", + "content": "4.3.3 Other Experimental Explorations" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 529, + 506, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 529, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 104, + 529, + 506, + 650 + ], + "type": "text", + "content": "Meanwhile, we also experiment with some existing improvements to GRPO. Some studies [24, 13] suggest that the distribution of reasoning models may differ significantly from the initial model, so removing the KL divergence can eliminate constraints on the model. As shown in Figure 6, our experiments similarly demonstrate that eliminating the KL divergence improves model performance. Additionally, Dr. GRPO [13] argues that the increase in response length may also stem from inherent biases in the GRPO objective function. After removing the KL divergence, we further exclude the response length term from the objective function and the reward variance term from the advantage calculation. As shown in Figure 6, the performance of the model improves again. At the same time, we observe a noticeable reduction in response length, the model tends to only provide descriptions of the video content while omitting analysis of the answer. We attribute this to the lack of strong reasoning in the training dataset, which fails to stimulate deep logical reasoning in the models." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 664, + 276, + 677 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 664, + 276, + 677 + ], + "spans": [ + { + "bbox": [ + 104, + 664, + 276, + 677 + ], + "type": "text", + "content": "5 Conclusion and Future Work" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "In this work, we propose the small-scale video reasoning model TinyLLaVA-Video-R1, which is trained using reinforcement learning on a general Video-QA dataset. It not only significantly enhances reasoning and thinking capabilities, but also exhibits the emergent characteristic of \"aha" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 109, + 74, + 503, + 265 + ], + "blocks": [ + { + "bbox": [ + 109, + 74, + 503, + 265 + ], + "lines": [ + { + "bbox": [ + 109, + 74, + 503, + 265 + ], + "spans": [ + { + "bbox": [ + 109, + 74, + 503, + 265 + ], + "type": "image", + "image_path": "cbbcdfd2362f759fc89c64054ba6520e5fd103ac36b6b5a417205f310112c08e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 271, + 504, + 327 + ], + "lines": [ + { + "bbox": [ + 104, + 271, + 504, + 327 + ], + "spans": [ + { + "bbox": [ + 104, + 271, + 504, + 327 + ], + "type": "text", + "content": "Figure 6: Ablation study on TinyLLaVA-R1 variants across multiple benchmarks. We compare the original TinyLLaVA-Video-R1 with two ablated versions: removing the KL divergence term (Del KL) and replacing the original GRPO with Dr. GRPO. Results are reported on MVBench, Video-MME (without subtitle input), MLVU, and MMVU (multiple-choice subset). Bold values indicate the best performance for each benchmark." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 342, + 504, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 386 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 386 + ], + "type": "text", + "content": "moment\". Additionally, we present a series of experimental findings, hoping this work will provide valuable insights for future practitioners exploring the video reasoning abilities of small-scale models. We will further investigate small-scale video reasoning models, with potential future directions as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 132, + 396, + 504, + 479 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 132, + 396, + 504, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 396, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 132, + 396, + 504, + 430 + ], + "type": "text", + "content": "- Introducing high-quality video reasoning data. Currently, TinyLLaVA-Video-R1 is trained only on general video question-answering data. We aim to explore the upper limits of the model's reasoning capabilities by introducing higher-quality video reasoning data." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 434, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 434, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 132, + 434, + 504, + 479 + ], + "type": "text", + "content": "- Improving reinforcement learning algorithms. Currently, TinyLLaVA-Video-R1 employs the GRPO algorithm for training. However, this approach exhibits notable limitations. To enhance its effectiveness in video reasoning tasks, we plan to refine the algorithm by addressing the key challenges observed in our experiment." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 491, + 504, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 491, + 504, + 533 + ], + "spans": [ + { + "bbox": [ + 104, + 491, + 504, + 533 + ], + "type": "text", + "content": "Acknowledgment. This work was partially supported by the National Science and Technology Major Project (Grant No. 2022ZD0116310), National Natural Science Foundation of China (Grant No. 62476016), the Fundamental Research Funds for the Central Universities." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 549, + 164, + 561 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 549, + 164, + 561 + ], + "spans": [ + { + "bbox": [ + 106, + 549, + 164, + 561 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 567, + 505, + 722 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 110, + 567, + 505, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 567, + 505, + 599 + ], + "spans": [ + { + "bbox": [ + 110, + 567, + 505, + 599 + ], + "type": "text", + "content": "[1] Liang Chen, Lei Li, Haozhe Zhao, Yifan Song, and Vinci. R1-v: Reinforcing super generalization ability in vision-language models with less than $3. https://github.com/Deep-Agent/R1-V, 2025. Accessed: 2025-02-02. 1, 2, 3" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 110, + 609, + 505, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 609, + 505, + 640 + ], + "spans": [ + { + "bbox": [ + 110, + 609, + 505, + 640 + ], + "type": "text", + "content": "[2] Lin Chen, Xilin Wei, Jinsong Li, Xiaoyi Dong, Pan Zhang, Yuhang Zang, Zehui Chen, Haodong Duan, Bin Lin, Zhenyu Tang, et al. Sharegpt4video: Improving video understanding and generation with better captions. arXiv preprint arXiv:2406.04325, 2024. 5" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 110, + 650, + 505, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 650, + 505, + 680 + ], + "spans": [ + { + "bbox": [ + 110, + 650, + 505, + 680 + ], + "type": "text", + "content": "[3] Kaituo Feng, Kaixiong Gong, Bohao Li, Zonghao Guo, Yibing Wang, Tianshuo Peng, Benyou Wang, and Xiangyu Yue. Video-r1: Reinforcing video reasoning in mllms. arXiv preprint arXiv:2503.21776, 2025. 1, 2, 3" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 110, + 691, + 505, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 691, + 505, + 722 + ], + "spans": [ + { + "bbox": [ + 110, + 691, + 505, + 722 + ], + "type": "text", + "content": "[4] Chaoyou Fu, Yuhan Dai, Yongdong Luo, Lei Li, Shuhuai Ren, Renrui Zhang, Zihan Wang, Chenyu Zhou, Yunhang Shen, Mengdan Zhang, et al. Video-mme: The first-ever comprehensive evaluation benchmark of multi-modal llms in video analysis. arXiv preprint arXiv:2405.21075, 2024. 6" + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "spans": [ + { + "bbox": [ + 111, + 72, + 505, + 103 + ], + "type": "text", + "content": "[5] Xinyu Guan, Li Lyna Zhang, Yifei Liu, Ning Shang, Youran Sun, Yi Zhu, Fan Yang, and Mao Yang. rstar-math: Small llms can master math reasoning with self-evolved deep thinking. arXiv preprint arXiv:2501.04519, 2025. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 111, + 110, + 504, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 110, + 504, + 142 + ], + "spans": [ + { + "bbox": [ + 111, + 110, + 504, + 142 + ], + "type": "text", + "content": "[6] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. 1, 2" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 148, + 504, + 179 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 148, + 504, + 179 + ], + "spans": [ + { + "bbox": [ + 111, + 148, + 504, + 179 + ], + "type": "text", + "content": "[7] Wenxuan Huang, Bohan Jia, Zijie Zhai, Shaosheng Cao, Zheyu Ye, Fei Zhao, Yao Hu, and Shaohui Lin. Vision-r1: Incentivizing reasoning capability in multimodal large language models. arXiv preprint arXiv:2503.06749, 2025. 1, 2" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 186, + 503, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 503, + 217 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 503, + 217 + ], + "type": "text", + "content": "[8] Binyuan Hui, Jian Yang, Zeyu Cui, Jiaxi Yang, Dayiheng Liu, Lei Zhang, Tianyu Liu, Jiajun Zhang, Bowen Yu, Keming Lu, et al. Qwen2. 5-coder technical report. arXiv preprint arXiv:2409.12186, 2024.3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 223, + 505, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 223, + 505, + 255 + ], + "spans": [ + { + "bbox": [ + 111, + 223, + 505, + 255 + ], + "type": "text", + "content": "[9] Kunchang Li, Yali Wang, Yinan He, Yizhuo Li, Yi Wang, Yi Liu, Zun Wang, Jilan Xu, Guo Chen, Ping Luo, et al. Mybench: A comprehensive multi-modal video understanding benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 22195-22206, 2024. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 261, + 504, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 261, + 504, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 261, + 504, + 282 + ], + "type": "text", + "content": "[10] Yanwei Li, Chengyao Wang, and Jiaya Jia. Llama-vid: An image is worth 2 tokens in large language models. In European Conference on Computer Vision, pages 323–340. Springer, 2025. 5" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 289, + 504, + 310 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 289, + 504, + 310 + ], + "spans": [ + { + "bbox": [ + 105, + 289, + 504, + 310 + ], + "type": "text", + "content": "[11] Bin Lin, Yang Ye, Bin Zhu, Jiaxi Cui, Munan Ning, Peng Jin, and Li Yuan. Video-llava: Learning united visual representation by alignment before projection. arXiv preprint arXiv:2311.10122, 2023. 5" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 316, + 504, + 338 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 316, + 504, + 338 + ], + "spans": [ + { + "bbox": [ + 105, + 316, + 504, + 338 + ], + "type": "text", + "content": "[12] Haotian Liu, Chunyuan Li, Yuheng Li, Bo Li, Yuanhan Zhang, Sheng Shen, and Yong Jae Lee. Llavanext: Improved reasoning,OCR, and world knowledge, 2024.5" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 344, + 504, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 344, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 105, + 344, + 504, + 365 + ], + "type": "text", + "content": "[13] Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783, 2025. 2, 7" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 372, + 504, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 372, + 504, + 393 + ], + "spans": [ + { + "bbox": [ + 105, + 372, + 504, + 393 + ], + "type": "text", + "content": "[14] Ziyu Liu, Zeyi Sun, Yuhang Zang, Xiaoyi Dong, Yuhang Cao, Haodong Duan, Dahua Lin, and Jiaqi Wang. Visual-rft: Visual reinforcement fine-tuning. arXiv preprint arXiv:2503.01785, 2025. 1" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 399, + 504, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 399, + 504, + 430 + ], + "spans": [ + { + "bbox": [ + 105, + 399, + 504, + 430 + ], + "type": "text", + "content": "[15] Fanqing Meng, Lingxiao Du, Zongkai Liu, Zhixiang Zhou, Quanfeng Lu, Daocheng Fu, Botian Shi, Wenhai Wang, Junjun He, Kaipeng Zhang, et al. Mm-eureka: Exploring visual aha moment with rule-based large-scale reinforcement learning. arXiv preprint arXiv:2503.07365, 2025. 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 437, + 504, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 437, + 504, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 437, + 504, + 468 + ], + "type": "text", + "content": "[16] Yingzhe Peng, Gongrui Zhang, Miaosen Zhang, Zhiyuan You, Jie Liu, Qipeng Zhu, Kai Yang, Xingzhong Xu, Xin Geng, and Xu Yang. Lmm-r1: Empowering 3b Imms with strong reasoning abilities through two-stage rule-based rl. arXiv preprint arXiv:2503.07536, 2025. 1, 2, 3" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 475, + 504, + 506 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 475, + 504, + 506 + ], + "spans": [ + { + "bbox": [ + 105, + 475, + 504, + 506 + ], + "type": "text", + "content": "[17] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. 2, 3" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 105, + 512, + 504, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 512, + 504, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 512, + 504, + 544 + ], + "type": "text", + "content": "[18] Haozhan Shen, Zilun Zhang, Kangjia Zhao, Qianqian Zhang, Ruochen Xu, and Tiancheng Zhao. Vlm-r1: A stable and generalizable r1-style large vision-language model. https://github.com/om-ai-lab/VLM-R1, 2025. Accessed: 2025-02-15. 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "spans": [ + { + "bbox": [ + 105, + 550, + 504, + 582 + ], + "type": "text", + "content": "[19] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. 2" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 105, + 587, + 504, + 619 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 587, + 504, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 587, + 504, + 619 + ], + "type": "text", + "content": "[20] Peng Wang, Shuai Bai, Sinan Tan, Shijie Wang, Zhihao Fan, Jinze Bai, Keqin Chen, Xuejing Liu, Jialin Wang, Wenbin Ge, et al. Qwen2-vl: Enhancing vision-language model's perception of the world at any resolution. arXiv preprint arXiv:2409.12191, 2024. 7" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 625, + 541, + 646 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 625, + 541, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 625, + 541, + 646 + ], + "type": "text", + "content": "[21] Xiaodong Wang and Peixi Peng. Open-r1-video. https://github.com/Wang-Xiaodong1899/Open-R1-Video, 2025.1" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 653, + 504, + 685 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 653, + 504, + 685 + ], + "spans": [ + { + "bbox": [ + 105, + 653, + 504, + 685 + ], + "type": "text", + "content": "[22] Yi Wang, Kunchang Li, Xinhao Li, Jiashuo Yu, Yinan He, Chenting Wang, Guo Chen, Baoqi Pei, Rongkun Zheng, Jilan Xu, Zun Wang, et al. Intervideo2: Scaling video foundation models for multimodal video understanding. arXiv preprint arXiv:2403.15377, 2024. 5" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 105, + 691, + 504, + 723 + ], + "type": "text", + "content": "[23] Junbin Xiao, Xindi Shang, Angela Yao, and Tat-Seng Chua. Next-qa: Next phase of question-answering to explaining temporal actions. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 9777–9786, 2021. 4" + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 505, + 339 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 105 + ], + "type": "text", + "content": "[24] Qiying Yu, Zheng Zhang, Ruofei Zhu, Yufeng Yuan, Xiaochen Zuo, Yu Yue, Tiantian Fan, Gaohong Liu, Lingjun Liu, Xin Liu, et al. Dapo: An open-source llm reinforcement learning system at scale. arXiv preprint arXiv:2503.14476, 2025. 2, 4, 7" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "spans": [ + { + "bbox": [ + 106, + 110, + 505, + 142 + ], + "type": "text", + "content": "[25] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 11975-11986, 2023. 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 148, + 504, + 170 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 148, + 504, + 170 + ], + "spans": [ + { + "bbox": [ + 107, + 148, + 504, + 170 + ], + "type": "text", + "content": "[26] Shaolei Zhang, Qingkai Fang, Zhe Yang, and Yang Feng. Llava-mini: Efficient image and video large multimodal models with one vision token. arXiv preprint arXiv:2501.03895, 2025. 5" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 176, + 504, + 207 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 176, + 504, + 207 + ], + "spans": [ + { + "bbox": [ + 106, + 176, + 504, + 207 + ], + "type": "text", + "content": "[27] Xingjian Zhang, Xi Weng, Yihao Yue, Zhaoxin Fan, Wenjun Wu, and Lei Huang. Tinyllava-video: A simple framework of small-scale large multimodal models for video understanding. arXiv preprint arXiv:2501.15513, 2025. 1, 2, 3" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "spans": [ + { + "bbox": [ + 105, + 213, + 504, + 236 + ], + "type": "text", + "content": "[28] Yuanhan Zhang, Jinming Wu, Wei Li, Bo Li, Zejun Ma, Ziwei Liu, and Chunyuan Li. Video instruction tuning with synthetic data. arXiv preprint arXiv:2410.02713, 2024. 4" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 241, + 504, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 241, + 504, + 274 + ], + "spans": [ + { + "bbox": [ + 106, + 241, + 504, + 274 + ], + "type": "text", + "content": "[29] Yilun Zhao, Lujing Xie, Haowei Zhang, Guo Gan, Yitao Long, Zhiyuan Hu, Tongyan Hu, Weiyuan Chen, Chuhan Li, Junyang Song, et al. Mmvu: Measuring expert-level multi-discipline video understanding. arXiv preprint arXiv:2501.12380, 2025. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 279, + 504, + 302 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 279, + 504, + 302 + ], + "spans": [ + { + "bbox": [ + 105, + 279, + 504, + 302 + ], + "type": "text", + "content": "[30] Hengguang Zhou, Xinui Li, Ruochen Wang, Minhao Cheng, Tianyi Zhou, and Cho-Jui Hsieh. R1-zero's\" aha moment\" in visual reasoning on a 2b non-sft model. arXiv preprint arXiv:2503.05132, 2025. 1, 2, 3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 307, + 504, + 339 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 307, + 504, + 339 + ], + "spans": [ + { + "bbox": [ + 105, + 307, + 504, + 339 + ], + "type": "text", + "content": "[31] Junjie Zhou, Yan Shu, Bo Zhao, Boya Wu, Shitao Xiao, Xi Yang, Yongping Xiong, Bo Zhang, Tiejun Huang, and Zheng Liu. Mlvu: A comprehensive benchmark for multi-task long video understanding. arXiv preprint arXiv:2406.04264, 2024. 6" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_content_list.json b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..5a6fed220a78d527efc3098d50b91b131bc7566e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_content_list.json @@ -0,0 +1,2144 @@ +[ + { + "type": "text", + "text": "SegEarth-R1: Geospatial Pixel Reasoning via Large Language Model", + "text_level": 1, + "bbox": [ + 184, + 122, + 816, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kaiyu Li $^{1,\\ast}$ , Zepeng Xin $^{1,\\ast}$ , Li Pang $^{1}$ , Chao Pang $^{2}$ , Yupeng Deng $^{3}$ , Jing Yao $^{3}$ , Guisong Xia $^{2}$ , Deyu Meng $^{1}$ , Zhi Wang $^{1}$ , Xiangyong Cao $^{1,\\dagger}$ , Xi'an Jiaotong University", + "bbox": [ + 212, + 207, + 782, + 253 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 272, + 537, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Remote sensing has become critical for understanding environmental dynamics, urban planning, and disaster management. However, traditional remote sensing workflows often rely on explicit segmentation or detection methods, which struggle to handle complex, implicit queries that require reasoning over spatial context, domain knowledge, and implicit user intent. Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To advance this task, we construct and release the first large-scale benchmark dataset called EarthReason, which comprises 5,434 manually annotated image masks with over 30,000 implicit question-answer pairs. Moreover, we propose SegEarth-R1, a simple yet effective language-guided segmentation baseline that integrates a hierarchical visual encoder, a large language model (LLM) for instruction parsing, and a tailored mask generator for spatial correlation. The design of SegEarth-R1 incorporates domain-specific adaptations, including aggressive visual token compression to handle ultra-high-resolution remote sensing images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Extensive experiments demonstrate that SegEarth-R1 achieves state-of-the-art performance on both reasoning and referring segmentation tasks, significantly outperforming traditional and LLM-based segmentation methods. Our data and code will be released at https://github.com/earth-insights/SegEarth-R1.", + "bbox": [ + 228, + 301, + 769, + 593 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 621, + 313, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Earth observation through remote sensing has emerged as a cornerstone of modern geospatial analysis, enabling unprecedented insights into environmental dynamics, urban planning, and disaster management [56, 45]. Satellite and aerial images provide a unique vantage point for monitoring planetary-scale phenomena, ranging from deforestation patterns to coastal erosion. However, converting this raw pixel data into actionable insights requires more than traditional computer vision techniques; it demands models capable of reasoning about spatial context, domain knowledge, and implicit user intent. Conventional remote sensing workflows predominantly rely on explicit tasks, e.g., semantic segmentation and referring segmentation [44, 8, 85], which operate within fixed taxonomies and require precise user instructions. While effective for well-defined scenarios, these approaches struggle to accommodate complex, implicit queries—for example, identifying regions at elevated risk of landslides based on slope, vegetation cover, and proximity to infrastructure. Such tasks limit implicit reasoning over heterogeneous spatial patterns, object relationships, and environmental metadata, exceeding the capabilities of standard segmentation or detection pipelines.", + "bbox": [ + 169, + 651, + 826, + 832 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To enable research in this task,", + "bbox": [ + 169, + 837, + 826, + 867 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09644v1 [cs.CV] 13 Apr 2025", + "bbox": [ + 22, + 262, + 60, + 708 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution", + "bbox": [ + 189, + 875, + 315, + 887 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "†Corresponding author: caoxiangyong@mail.xjtu.edu.cn", + "bbox": [ + 192, + 888, + 531, + 902 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint. Under review.", + "bbox": [ + 171, + 922, + 315, + 936 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 94, + 320, + 212 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg", + "image_caption": [ + "A tennis court on the far left", + "Figure 1: Comparison of semantic segmentation, referring segmentation and geospatial pixel inference. (left) Samples from the LoveDA [67] and RRSIS-D [40] datasets. (right) Samples from the EarthReason dataset. Previous tasks are limited by fixed taxonomies and explicit instructions, while geospatial pixel reasoning supports complex implicit instructions and requires the reasoning capability of the model." + ], + "image_footnote": [], + "bbox": [ + 181, + 213, + 321, + 329 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg", + "image_caption": [ + "Geospatial Pixel Reasoning" + ], + "image_footnote": [ + "USER: When there is an urgent necessity for emergency medical services, what designated location facilitates rapid air evacuations for patients?", + "ASSISTANT: Perched atop the medical complex, the airport helipad facilitates swift helicopter evacuations for patients in critical condition. This strategic location offers immediate access to vital healthcare services, ensuring timely assistance in emergencies." + ], + "bbox": [ + 336, + 112, + 813, + 294 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "we build and release the first large-scale benchmark dataset, called EarthReason, which contains 5,434 manually annotated remote sensing image-mask pairs drawn from diverse classification sources, covering 28 scene categories at spatial resolutions ranging from $0.5\\mathrm{m}$ to $153\\mathrm{m}$ . Each image is paired with multiple implicit reasoning questions that require the model to infer target masks based on contextual and domain-specific knowledge, rather than explicit object names. In addition, by incorporating empty target cases and varying spatial scales, EarthReason pushes models to generalize across complex real-world remote sensing scenarios.", + "bbox": [ + 169, + 421, + 823, + 518 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Recent progress in multimodal large language models (MLLMs) has demonstrated impressive performance in natural image domains, where models like LISA [26] and PixelLM [55] leverage large language models (LLMs) [62, 7, 79] to interpret rich textual prompts and generate pixel-level outputs. These frameworks excel at tasks such as reasoning segmentation [26], where the target mask is not directly specified but must be inferred from nuanced language cues. Unfortunately, directly transferring these methods to geospatial pixel reasoning is non-trivial since remote sensing images present extreme scale variation, densely packed small-scale objects and ultra-high resolution that violate assumptions of natural images. Moreover, different from natural images, remote sensing queries often require spatial correlations. For instance, identifying \"informal settlements\" relies on detecting roof material irregularities, road network fragmentation, and spatial adjacency to legal land-use zones.", + "bbox": [ + 169, + 525, + 823, + 676 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To address these challenges, we present SegEarth-R1, a simple yet effective language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing, and a tailored mask generator designed for spatial correlation. Further, some components are also designed to adapt to the characteristics of remote sensing images. Specifically, we propose the aggressive visual token compression to handle ultra-high-resolution images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Despite its architectural simplicity, SegEarth-R1 achieves advanced performance on EarthReason and referring segmentation datasets, significantly outperforming both traditional and LLM-based segmentation methods.", + "bbox": [ + 169, + 683, + 823, + 808 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In summary, our contributions are as follows:", + "bbox": [ + 171, + 815, + 472, + 829 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce the geospatial pixel reasoning task, which requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge.", + "- We build and release the first large-scale benchmark with 5,434 image-mask pairs, 28 categories, and over 30,000 implicit question-answer pairs, fostering research in geospatial pixel reasoning." + ], + "bbox": [ + 169, + 844, + 823, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We propose an LLM-based segmentation model, SegEarth-R1, which incorporates new segmentation capabilities in remote sensing, containing several domain-specific designs.", + "- Extensive experiments show that SegEarth-R1 achieves state-of-the-art performance on reasoning and referring segmentation tasks, compared to traditional methods and other LLM-based methods." + ], + "bbox": [ + 169, + 90, + 823, + 165 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 171, + 199, + 321, + 214 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.1 Referring Segmentation", + "text_level": 1, + "bbox": [ + 171, + 238, + 380, + 253 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Referring segmentation aims to segment targets in an image based on natural language descriptions, requiring precise alignment between linguistic expressions and visual content. Early approaches adopted CNN-RNN/LSTM frameworks [18, 36, 32, 47, 60, 21] to extract visual features and encode textual queries, respectively. However, these methods struggled with complex expressions due to limited local receptive fields and insufficient cross-modal interaction [24]. To address these limitations, attention mechanisms [63] emerged as a pivotal technique [10, 81, 72, 19, 77, 49, 74, 58]. VLT [10] dynamically generates adaptive query vectors based on image-text interactions, enabling precise localization through cross-modal attention. LAVT [81] further advances this paradigm by integrating hierarchical visual-linguistic fusion within a Swin Transformer [43] backbone, where pixel-word attention refines multiscale features to achieve fine-grained semantic alignment. In remote sensing, specifying segmentation for certain instances can improve interpretation efficiency and user interactivity. Recently, Yuan et al. [85] introduced referring segmentation into satellite images for the first time. Subsequently, following the LAVT [81] architecture, RMSIN [40] also incorporated adaptive rotated convolutions to address scale and orientation variations. FIANet [28] and CroBIM [12] introduced elaborate cross-modal interactions for feature alignment. RSSep [17] reformulated referring segmentation as a sequence-to-sequence task, predicting polygonal boundaries to handle scale variations and blurred edges [38]. However, existing methods effectively follow explicit instructions for target segmentation but lack implicit intent reasoning. In this paper, the proposed geospatial pixel reasoning task advances beyond referring segmentation by employing LLMs' reasoning capabilities to interpret subtle instructions and accurately segment desired targets.", + "bbox": [ + 169, + 268, + 826, + 547 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2.2 LLM-based Segmentation", + "text_level": 1, + "bbox": [ + 171, + 575, + 393, + 590 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Recent advances in LLMs have significantly expanded their capabilities to integrate pixel-level segmentation with language reasoning [76, 68, 73, 2, 61, 88, 84, 16]. For instance, Florence-2 [76] unified text, detection, and segmentation through a sequence-to-sequence framework with task instructions. To address the complexity of real-world segmentation scenarios, some works focus on architectural specialization and instruction-aware adaptation. LISA [26, 80] established the paradigm by introducing a [SEG] token to connect LLMs with segmentation decoders like SAM [25], enabling language-guided mask prediction. Subsequent studies enhanced this paradigm: GSVA [75] introduced shared-weight [SEG] tokens and [REJ] tokens for multi-target and empty-target handling [35, 55, 90], while GLaMM [53] achieved pixel-grounded conversational capabilities through holistic segmentation [94]. Parallel efforts focused on architectural unification - PSALM [92] established a flexible input schema for multi-task segmentation, and OMG-LLaVA [89] combined universal segmentation backbones with LLMs for pixel-level reasoning. Video understanding extensions emerged through VISA [78] and InstructSeg [71], which integrated temporal reasoning. Notably, Text4Seg [27] redefined segmentation as a text generation problem using semantic descriptors, eliminating the need for an additional decoder. In remote sensing, benefiting from the above paradigms [26, 27], some unified models such as RSUniVLM [42], GeoGround [95] and GeoPix [50] are equipped with segmentation capabilities. Although based on LLM, these models focus only on explicit text-guided segmentation. Further, GeoPixel [57] introduced grounded conversation generation [53] to remote sensing, but it still does not provide reasoning capability. Our SegEarth-R1 also follows the LLM-based segmentation paradigm, but is different from previous methods. Specifically, SegEarth-R1 is the first work to support reasoning about the target region from implicit queries, and its components are specifically designed for the challenges in remote sensing.", + "bbox": [ + 169, + 607, + 826, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/07ead8f5007da4af1336a18007e5e0291b9497abe37e8cec54c29f8a7d7560f2.jpg", + "table_caption": [ + "Table 1: Comparison between EarthReason and other related datasets. The gray rendering denotes the natural image dataset. \"Seg\", \"Det\", \"VG\", \"Cls\" denote segmentation, detection, visual grounding and classification datasets, respectively." + ], + "table_footnote": [], + "table_body": "
DatasetMask LabelReasoning QuerySpatial resolutionImage SizeImage NumImage SourceClass Num
ReasonSeg [26]--1,218OpenImages (Seg) & ScanNetv2 (Seg)-
LLM-Seg40K [65]--14,000LVIS (Seg) & EgoObjects (Seg)-
EarthVQA [66]X0.3m102426,000LoveDA (Seg)14
RegSegRS [85]X0.5m-30m80024,420SkyScapes (Seg)14
RRSIS-D [40]X0.13m512217,402RSVGD (VG) & DIOR (OD)20
RISBench [12]X0.1m-30m512252,472DOTAv2(OD) & DIOR (OD)26
EarthReason0.5m-153m1232-761725,434AID (Cls) & fMoW (Cls)28
", + "bbox": [ + 183, + 138, + 818, + 250 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Benchmark Geospatial Pixel Reasoning Dataset—EarthReason", + "text_level": 1, + "bbox": [ + 169, + 273, + 732, + 290 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 Comparison with Related Dataset", + "text_level": 1, + "bbox": [ + 171, + 305, + 449, + 320 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We analyze three types of tasks and datasets related to geospatial pixel reasoning, i.e., natural image reasoning segmentation, remote sensing visual question answering (VQA), and remote sensing referring segmentation, as shown in Table 1. RefSegRS [85] and RRSIS-D [40] provide early benchmarks with image-text-mask triplets. RISBench [12], the largest RRSIS dataset to date, introduced 52,472 triplets with oriented bounding boxes and pixel-level masks generated via a semi-automatic pipeline. These datasets address the limitations of earlier text-focused datasets (e.g., RSICD [46], EarthVQA [66], etc.) and enable comprehensive evaluation of multimodal models. Compared to the previous referring segmentation datasets, our EarthReason datasets has the following features: (1) The mask labels in EarthReason are not explicitly specified by the query, but require further reasoning to determine the target, which challenges the model's reasoning ability. (2) EarthReason uses a more raw data source. The previous related datasets directly transform existing segmentation datasets [1, 67] or SAM-processed detection datasets [86, 31, 11], while our EarthReason uses images from classification datasets [44, 8] and we manually annotate them. This allows EarthReason to provide more data gain when it comes to co-training of unified segmentation tasks. (3) EarthReason has more diverse spatial resolutions and image sizes, which are conducive to solving the object scale spanning problem inherent in remote sensing images [56]. Compared to the first natural image reasoning segmentation dataset, ReasonSeg, EarthReason contains $4.46 \\times$ more data than it. Therefore, we believe that EarthReason, as the first geospatial pixel reasoning dataset in the remote sensing area, is capable of performing initial explorations of this task.", + "bbox": [ + 169, + 330, + 826, + 594 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Dataset Generation Pipeline", + "text_level": 1, + "bbox": [ + 171, + 609, + 410, + 626 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our benchmark dataset EarthReason is generated according to the following three steps, i.e., image collection, question-answer pair generation, and object mask labeling.", + "bbox": [ + 169, + 636, + 823, + 665 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Image Collection. As mentioned above, to avoid potential data leakage in the future construction of unified segmentation models for remote sensing, we collect images from existing classification data. Although this increases the annotation cost, it also motivates more diverse scenes. Specifically, we first select the 28 categories that are more suitable for reasoning in the Million-AID [44] dataset, and sample about 200 images for each category. Then, we find that the actual geographic range contained in Million-AID's images is limited. Thus, we also collect 800 images in the fMoW [8] dataset to enhance the model's reasoning ability in complex scenes. Further, to alleviate the factitious illusion issue [51], we add an extra 200 empty target images (i.e., the implied target is not in the image). Finally, some low-quality images are eliminated, and we obtain a total of 5,434 images.", + "bbox": [ + 169, + 671, + 826, + 796 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Question-Answer Pair Generation. We use GPT-4o1 to construct question-answer pairs, and given its excellent visual comprehension, we take the remote sensing image and the corresponding scene category (provided by Million-AID and fMoW) as part of the prompt to generate questions and answers that are closely related to the image. An example of such a prompt is illustrated in Appendix A.1. In addition, following [26], to make the questions and answers diverse, we adapt GPT-3.5 to rephrase the instructional questions and answers, as shown in Appendix Figure 7.", + "bbox": [ + 169, + 801, + 823, + 886 + ], + "page_idx": 3 + }, + { + "type": "page_footnote", + "text": "1https://platform.openai.com/docs/models/gpt-4o", + "bbox": [ + 192, + 897, + 558, + 911 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg", + "image_caption": [ + "Figure 2: Overview of the proposed SegEarth-R1 architecture. Given an image $X_{v}$ and a text description $X_{q}$ , a hierarchical visual encoder and a proposed connector are used to extract and compress visual tokens. Then, the visual tokens $\\square$ and description embeddings $\\square$ are fed into an LLM for instruction interpretation and semantic correlation. Finally, description embeddings are directly mapped to the query vector and used for spatial correlation and segmentation mask generation." + ], + "image_footnote": [], + "bbox": [ + 174, + 92, + 823, + 258 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Object Mask Labeling. Different from previous referring and reasoning segmentation datasets (which use off-the-shelf masks or bounding boxes), we annotate images from scratch. Specifically, we employ multiple experts in remote sensing and vision, assign each expert a few hundred images to annotate, and cross-validate the annotations after they are completed. For simple targets (e.g., lake), SAM-H [25] is used to assist in annotation; for complex targets (e.g., wind turbine), each point of the polygon is finely marked. A description of mask quality is provided in Appendix A.1.", + "bbox": [ + 169, + 353, + 826, + 438 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Dataset Statistics. The EarthReason dataset is partitioned into training, validation, and testing sets, comprising 2,371, 1,135, and 1,928 images, respectively. In the training set, each image is annotated with an average of six questions and three corresponding answers. The average question length is 20.86 words, while the average answer length is 26.76 words. To assess the model's generalization capability, several semantic categories are deliberately reserved for the validation and test sets, ensuring they remain unseen during training. Additional dataset details are provided in the Appendix A.2.", + "bbox": [ + 169, + 443, + 825, + 541 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Baseline Geospatial Pixel Reasoning Method—SegEarth-R1", + "text_level": 1, + "bbox": [ + 169, + 569, + 705, + 587 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Compared with natural images, remote sensing images exhibit distinctive characteristics that demand specialized architectural designs for pixel-wise geospatial reasoning. In this work, we propose SegEarth-R1, a simple yet powerful baseline for geospatial pixel reasoning that effectively harnesses LLM capabilities while incorporating domain-specific adaptations. As illustrated in Figure 2, our architecture comprises three core parts: A visual encoder for image feature extraction, an LLM for instruction interpretation and semantic correlation, and a mask generator for spatial correlation and mask prediction. Each part incorporates critical design considerations to address the unique challenges of remote sensing images.", + "bbox": [ + 169, + 606, + 823, + 717 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Hierarchical Visual Encoder", + "text_level": 1, + "bbox": [ + 171, + 743, + 413, + 757 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Satellite and aerial targets present two critical challenges: (1) extreme scale variations ranging from sub-meter objects to kilometer-scale geographical formations [56], and (2) densely distributed small objects requiring high-resolution analysis [30]. Conventional ViT-based encoders adopted in MLLMs [26, 80, 25, 75] (e.g., image encoder in CLIP [52] and SAM [25, 54]) prove suboptimal due to their fixed-scale feature extraction and information compression through aggressive patch merging. To alleviate these limitations, following [92], SegEarth-R1 employs a Swin Transformer [43] backbone enhanced with progressive feature hierarchy construction. This architecture generates multi-scale feature maps $v_{h}, h \\in [1,4]$ at $1/4, 1/8, 1/16, 1/32$ of the original resolution through controlled downsampling operations, preserving high-resolution details for small objects while capturing contextual semantics at deeper layers.", + "bbox": [ + 169, + 772, + 825, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Large Language Model and Input Schema", + "text_level": 1, + "bbox": [ + 171, + 90, + 509, + 107 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "SegEarth-R1 adopts the MLLM paradigm [37, 29] by jointly embedding visual tokens and textual instructions into a unified LLM input space for multimodal reasoning. Unlike natural images, remote sensing data exhibits ultra-high-resolution coverage [23, 64], posing computational challenges when processed through billion-level LLMs. Therefore, we expect to compress the visual token to alleviate the computational cost and make only simple semantic correlations in LLM.", + "bbox": [ + 169, + 116, + 823, + 186 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.2.1 Visual Token", + "text_level": 1, + "bbox": [ + 171, + 200, + 316, + 214 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Redundancy Analysis. Image redundancy quantifies the proportion of compressible, non-informative data within an image. To investigate the feasibility of aggressive visual token compression for remote sensing images, we conduct a redundancy analysis from dual perspectives: pixel-level statistical redundancy and spatial structural redundancy.", + "bbox": [ + 169, + 224, + 823, + 281 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- According to information theory [59], entropy measures the average uncertainty or information content of an image, while the maximum entropy corresponds to the idealized scenario where pixel values are uniformly distributed (i.e., no redundancy). Thus, from the entropy perspective, the image redundancy can be defined as [14]:", + "bbox": [ + 169, + 292, + 823, + 349 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nR _ {e} = 1 - \\frac {- \\sum_ {l = 0} ^ {L - 1} p (l) \\log_ {2} p (l)}{\\log_ {2} L}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 388, + 354, + 825, + 391 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $L$ denotes the number of distinct intensity levels (e.g., $L = 256$ for an 8-bit grayscale image), and $p(l)$ denotes the probability mass function of the pixel intensity value $l$ .", + "bbox": [ + 181, + 398, + 825, + 428 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "- Beyond pixel-level statistical redundancy, structural self-similarity reflects spatial redundancy caused by repetitive patterns (e.g., textures, geometric features). To quantify this, we leverage the Structural Similarity Index Matrix (SSIM) [70] to measure inter-patch similarity. For an image partitioned into $N$ patches, the SSIM matrix $\\mathbf{M} \\in \\mathbb{R}^{N \\times N}$ is defined as:", + "bbox": [ + 169, + 439, + 823, + 494 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {M} (i, j) = \\frac {(2 \\mu_ {i} \\mu_ {j} + C _ {1}) (2 \\sigma_ {i j} + C _ {2})}{(\\mu_ {i} ^ {2} + \\mu_ {j} ^ {2} + C _ {1}) (\\sigma_ {i} ^ {2} + \\sigma_ {j} ^ {2} + C _ {2})}, \\quad \\forall i, j \\in 1, \\dots , N \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 294, + 502, + 825, + 539 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "where $\\mu_{i},\\sigma_{i}$ denote the mean and variance of the $i$ -th patch, $\\sigma_{ij}$ is the covariance between patches $i$ and $j$ , and $C_1,C_2$ are stability constants. Then, the structural self-similarity redundancy $R_{s}$ is derived by averaging off-diagonal elements of $\\mathbf{M}$ :", + "bbox": [ + 181, + 544, + 823, + 585 + ], + "page_idx": 5 + }, + { + "type": "equation", + "text": "\n$$\nR _ {s} = \\frac {1}{N (N - 1)} \\sum_ {i \\neq j} \\mathbf {M} (i, j). \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 401, + 592, + 825, + 631 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We evaluate six benchmark datasets spanning natural images (COCO [3], ADE20K [93], PASCAL [13]) and remote sensing images (LoveDA [67], DeepGlobe [9], xBD [15]) for redundancy analysis. As shown in Figure 3, our analysis reveals two critical findings: 1) Remote sensing images demonstrate $1.9\\sim 3.3\\times$ higher entropic redundancy than natural images, indicating greater pixel-level", + "bbox": [ + 169, + 641, + 421, + 794 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg", + "image_caption": [ + "(a) pixel-level redundancy", + "(b) spatial structure redundancy" + ], + "image_footnote": [], + "bbox": [ + 442, + 652, + 624, + 734 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg", + "image_caption": [ + "Figure 3: Redundancy analysis of remote sensing datasets and natural images, and the former exhibits higher redundancy." + ], + "image_footnote": [], + "bbox": [ + 632, + 652, + 813, + 733 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "compressibility. 2) The average self-similarity for remote sensing data exceeds natural images by $42.6\\%$ , confirming the higher prevalence of repetitive textures and geometric patterns. This insight justifies aggressive token compression for semantic-level comprehension in remote sensing images.", + "bbox": [ + 169, + 794, + 823, + 837 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Token Compression Connector. In modern MLLM, connectors such as Q-Former [29] and MLP [37] are designed to transform visual tokens into a multi-modal space. However, some works [4, 82] point out that Q-Former may lead to loss of vision information and is difficult to train. Therefore, in SegEarth-R1, we follow the MLP connector fashion in LLaVA [37] and use a simple but effective connector, i.e., stacked convolutional blocks and Layer Normalization (LN). Here, convolutional", + "bbox": [ + 169, + 840, + 825, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "blocks are used for spatial down-sampling to compress the size of the feature map, and LN is used to stabilize cross-modal training. Specifically, our connector can be formulated as:", + "bbox": [ + 169, + 90, + 823, + 119 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nv _ {o u t} = \\left(\\operatorname {C o n v} \\circ L N\\right) ^ {d} \\left(v _ {4}\\right), \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 122, + 825, + 140 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $\\circ$ denotes the function composition operator, and $d$ denotes the number of stacked layers.", + "bbox": [ + 169, + 141, + 797, + 156 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2.2 Text Instruction", + "text_level": 1, + "bbox": [ + 171, + 169, + 336, + 184 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Although the instructions involved in geospatial pixel reasoning are implicit and contain more words than referring segmentation, they still maintain the same data format. Therefore, it is easy to convert them into question-answer pairs using a template like \"USER: This is an image , please doing geospatial pixel reasoning according to the following instruction: . ASSISTANT: $ in text instruction.", + "bbox": [ + 189, + 896, + 514, + 911 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Network Architecture. Unless otherwise specified, SegEarth-R1 use phi-1.5 (1.3B) [33] as the LLM, and adopt the Swin-B as the visual encoder. The token compression connector is configured with a layer number $d = 2$ . The mask generator follows the Mask2Former architecture, but removes mask tokens as mentioned above.", + "bbox": [ + 169, + 90, + 823, + 147 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Implementation details. During training, we use bf16 precision and freeze the visual encoder. The LLM is initialized from Phi-1.5, while both the Swin-B encoder and the mask generator are initialized with pretrained weights from Mask2Former. All images are resized to $1024 \\times 1024$ , maintaining the original aspect ratio by padding the shorter side. We adopt the AdamW optimizer with an initial learning rate of $1 \\times 10^{-4}$ , cosine learning rate schedule, and no weight decay. A uniform batch size of 16 is used across datasets, with training steps set to 7,610 (RRSIS-D), 5,400 (RefSegRS), and 2,220 (EarthReason). All experiments are conducted on two NVIDIA A100 80GB GPUs.", + "bbox": [ + 169, + 152, + 826, + 252 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.2 Geospatial Pixel Reasoning Results", + "text_level": 1, + "bbox": [ + 171, + 281, + 457, + 295 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/626e52b1d30a91189d3148a9dff60dcfc5c62d67257be0fb854d0081f77df054.jpg", + "table_caption": [ + "Table 2: Geospatial pixel reasoning results among SegEarth-R1 (ours) and previous related works." + ], + "table_footnote": [], + "table_body": "
MethodVisual EncoderLLM TypecloUgloU
ValTestValTest
LISA [26]CLIP-LVicuna-7B [7]57.3959.1061.0460.88
PixelLM [55]CLIP-LVicuna-7B [7]57.7959.2257.9460.01
PSALM [92]Swin-Bphi-1.5 (1.3B) [33]62.0364.6166.6168.30
SegEarth-R1Swin-Bphi-1.5 (1.3B) [33]64.1368.2568.6070.75
", + "bbox": [ + 179, + 349, + 624, + 426 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "LISA and PixelLM demonstrate comparable performance; however, despite leveraging larger LLM or MLLM, the quality of their predicted segmentation masks remains suboptimal. This can be primarily attributed to their reliance on CLIP as the visual encoder, which tends to diminish the representation of small-scale geospatial targets. As one of the baselines of SegEarth-R1, PSALM achieves notable improvements over LISA and PixelLM. Nevertheless, PSALM does not adequately incorporate LLM-based segmentation and the Mask2Former paradigm, and lacks considerations for overhead images. SegEarth-R1 achieves the best results on both metrics surpassing PSALM by $3.64\\%$ and $2.45\\%$ on the test set. Importantly, SegEarth-R1 uses fewer visual tokens in LLM and reduces the number of queries in the mask generator, thus providing a lower inference cost.", + "bbox": [ + 169, + 435, + 823, + 561 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We conduct a comparative evaluation of SOTA LLM-based methods and SegEarth-R1 on the Earth-Reason dataset. As shown in Table 2, all models are trained solely on the training split of EarthReason to ensure a fair comparison.", + "bbox": [ + 640, + 311, + 826, + 435 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Referring Segmentation Results", + "text_level": 1, + "bbox": [ + 171, + 590, + 434, + 604 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "SegEarth-R1 also supports basic explicit language-guided segmentation. As shown in Table 3, we compare its performance with existing SOTA traditional methods (not based on LLM) as well as recent LLM-based methods. Notably, prior to SegEarth-R1, LLM-based methods consistently underperformed in comparison to traditional methods on the referring segmentation task. For instance, the advanced GeoGround [95] lags behind RMSIN [40] by $3.7\\%$ in terms of gIoU on the RRSIS-D dataset. In contrast, SegEarth-R1, as a universal LLM-based language-guided segmentation method, surpasses traditional methods on the referring segmentation task for the first time with a $2.2\\%$ improvement. This result highlights the enhanced general", + "bbox": [ + 169, + 621, + 454, + 869 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/0f88521154e49797916ce7004643aedd03775ca6d240ab8fe3dc4e9d7ad29321.jpg", + "table_caption": [ + "Table 3: Referring segmentation results among SegEarth-R1 and previous related works on RRSIS-D dataset." + ], + "table_footnote": [], + "table_body": "
MethodP@0.5cloUgloU
ValTestValTestValTest
Traditional method:
RRN [32]CVPR'1851.0951.0766.5366.4346.0645.64
CSMC [83]CVPR'1955.6855.3269.3969.3948.8548.54
LSCM [22]ECCV'2057.1256.0269.0569.2850.3649.92
CMPC [21]CVPR'2057.9355.8369.2269.3950.4149.24
BRINet [20]CVPR'2058.7956.9070.7369.8851.1449.65
CMPC+ [39]TPAMI'2059.1957.6570.1468.6451.4150.24
LGCE [85]TGRS'2468.1067.6576.6876.3460.1659.37
RIS-DMMI [19]CVPR'2370.4068.7477.0176.2060.7260.12
LAVT [81]CVPR'2269.5469.5277.5977.1961.4661.04
RMSIN [40]CVPR'2474.6674.2678.2777.7965.1064.20
LLM-based method:
LISA [26]CVPR'2427.0724.51--27.8426.78
PixelLM [55]CVPR'2433.4628.81--33.8931.65
NEXT-Chat [87]arXiv'2328.9726.37--26.9824.98
GeoGround [95]arXiv'2568.6967.50--61.1060.50
SegEarth-R178.6276.9678.9278.0167.5666.40
", + "bbox": [ + 475, + 660, + 815, + 852 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "ization capability and practical potential of SegEarth-R1. On the RefSegRS dataset, the improvement of SegEarth-R1 is more significant than the previous method, with an $8.33\\%$ and $9.87\\%$ improvement over RMSIN on the validation and testing sets, respectively, as listed in Table 4.", + "bbox": [ + 169, + 869, + 823, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/c1fae4041eba706b24a7df9c0e3dce21c67da5799853e8d45371e6e5a8da4d35.jpg", + "table_caption": [ + "Table 4: Referring segmentation results among SegEarth-R1 and previous related works on RefSegRS dataset." + ], + "table_footnote": [], + "table_body": "
MethodP@0.5P@0.6P@0.7P@0.8P@0.9cloUgloU
ValTestValTestValTestValTestValTestValTestValTest
BRINet [20]CVPR'2036.8620.7235.5314.2619.939.8710.662.982.841.1461.5958.2238.7331.51
LSCM [22]ECCV'2056.8231.5441.2420.4121.859.5112.115.292.510.8462.8261.2740.5935.54
CMPC [21]CVPR'2046.0932.3626.4514.1412.766.557.421.761.390.2263.5555.3942.0840.63
CMSA [83]CVPR'1939.2428.0738.4420.2520.3912.7111.795.611.520.8365.8464.5343.6241.47
RRN [32]CVPR'1855.4330.2642.9823.0123.1114.8713.727.172.640.9869.2465.0650.8141.88
EVF-SAM [91]Arxiv'2457.7735.1737.5922.3416.249.364.872.861.860.3959.6155.5146.9836.64
CMPC+ [39]TPAMI'2156.8449.1937.5928.3120.4215.3110.678.122.780.5570.6266.5347.1343.65
CARIS [41]ACMMM'2368.4545.4047.1027.1925.5215.0814.628.873.711.9875.7969.7454.3042.66
CRIS [69]CVPR'2253.1335.7736.1924.1124.3614.3611.836.382.551.2172.1465.8753.7443.26
LAVT [81]CVPR'2280.9751.8458.7030.2731.0917.3415.559.524.642.0978.5071.8661.5347.40
RIS-DMMI [19]CVPR'2386.1763.8974.7144.3038.0519.8118.106.493.251.0074.0268.5865.7252.15
LGCE [85]TGRS'2490.7273.7586.3161.1471.9339.4632.9516.0210.215.4583.5676.8172.5159.96
RMSIN [40]CVPR'2493.9779.2089.3365.9974.2542.9829.7016.517.893.2582.4175.7273.8462.58
SegEarth-R195.8286.3093.2779.5388.8669.5778.1948.8722.0410.7385.0179.0082.1772.45
", + "bbox": [ + 174, + 111, + 823, + 268 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg", + "image_caption": [ + "Figure 5: Qualitative Results of SegEarth-R1 on EarthReason. More results can be found in Appendix C." + ], + "image_footnote": [ + "USER: In this region, what infrastructure changes could best support an extended growing season for local agricultural crops? ASISTANT: The greenhouse creates a controlled environment that shields plants from harsh weather, harmful insects, and infectious diseases, all of which can threaten the health of the plant." + ], + "bbox": [ + 176, + 295, + 290, + 383 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg", + "image_caption": [], + "image_footnote": [ + "USER: Which types of facilities would aid small boat docking in this coastal area, while also contributing positively to tourism and local fisheries? \nASSISTANT: A pier is a man-made construction that extends over the water's surface, acting as a convenient spot for boats to moor while also providing easy access for passengers to get on and off. It serves as a vital hub for the maritime industry, ensuring smooth operations and timely delivery of products." + ], + "bbox": [ + 388, + 296, + 500, + 383 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg", + "image_caption": [], + "image_footnote": [ + "USER: What locations facilitate the takeoff and landing of aircraft under supervision from air traffic controllers in a regulated setting? \nASSISTANT: The airport runway is meticulously crafted to facilitate the safe ascent and descent of aircraft, ensuring that every flight begins and ends with precision." + ], + "bbox": [ + 622, + 296, + 735, + 383 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5.4 Ablation Study", + "text_level": 1, + "bbox": [ + 171, + 433, + 318, + 450 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/e874fa801bbf6562c1840a990ffd4cc51dc1bf857b6a6d160c17d709d5e5430e.jpg", + "table_caption": [ + "Table 5: Ablation of SegEarth-R1 components on EarthReason: query description embedding (Query D.E.), description projector $(D$ -Projectile), token compression connector (T.C. Connector)." + ], + "table_footnote": [], + "table_body": "
Query D.E.D-ProjectorT.C. ConnectorcloUgloU
ValTestValTest
XXX62.0364.6166.6168.30
XX63.3466.1967.4269.15
XX63.3266.3167.2269.21
XX63.4765.4168.3169.20
X64.1266.7168.6169.61
64.1368.2568.6070.75
", + "bbox": [ + 183, + 523, + 524, + 614 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/a22e302b05e8e7f61e8118a42801d303ff0685a00d8e30275a530a2156b8560b.jpg", + "table_caption": [ + "Table 6: Ablation of LLM type on RRSIS-D." + ], + "table_footnote": [], + "table_body": "
LLM TypecloUgloU
ValTestValTest
phi-1.5 (1.3B)78.9278.0167.5666.40
phi-2 (2B)78.9878.3567.9166.67
Qwen2.5 (0.5B)78.5377.8767.7066.49
", + "bbox": [ + 563, + 487, + 799, + 546 + ], + "page_idx": 8 + }, + { + "type": "table", + "img_path": "images/7ff79a3cf7322060499f5dd9bfc86b7f0a71adf84720a93f08a6b713b74830aa.jpg", + "table_caption": [ + "Table 7: Ablation of $d$ on EarthReason Val set." + ], + "table_footnote": [], + "table_body": "
d#Visual TokengIoU| | d#Visual TokengIoU
0102468.2826468.60
125668.4731668.22
", + "bbox": [ + 553, + 574, + 807, + 612 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Components. We conduct ablation studies on the EarthReason dataset to evaluate the effectiveness of the novel components involved in SegEarth-R1. As listed in Table 5, the first row shows the results of the PSALM baseline. Each proposed component contributes to performance enhancement, yielding improvements ranging from $0.85\\%$ to $0.9\\%$ . The T.C. Connector and Query D.E. not only enhances performance but also reduces computational overhead. Further, the proposed components can be well coupled, and when they are all activated, i.e., complete SegEarth-R1, all metrics exhibit substantial gains over the baseline, confirming the effectiveness and compatibility of the proposed design. In fact, although these components are initially designed with remote sensing scenarios in mind, their underlying principles offer transferable insights applicable to general image understanding.", + "bbox": [ + 169, + 621, + 826, + 747 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "LLM Type. Given the limited scale of the dataset, we select some small LLM for comparison, as presented in Table 6. SegEarth-R1 demonstrates consistently high performance across different LLM, indicating the robustness and architectural stability of the overall framework. Notably, with Qwen2.5 (0.5B) [79], it still achieves competitive results, indicating its potential for edge deployment.", + "bbox": [ + 169, + 752, + 826, + 809 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Layer Number of T.C. Connector. The layer number $d$ controls the number of visual tokens fed into the LLM. As shown in Table 7, increasing token quantity does not improve performance. This observation aligns with our earlier analysis, suggesting that appropriate compression of visual tokens is beneficial for the global understanding of a remote sensing image. In SegEarth-R1, spatial correlations between the image and the instruction are primarily handled by the mask generator, while the LLM is only responsible for relatively semantic correlations. This division of labor allows for more efficient use of computational resources without compromising performance.", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 491, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 89, + 302, + 106 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In this paper, we introduce geospatial pixel reasoning, a new task in remote sensing that requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge. To enable research in this direction, we present EarthReason, the first large-scale benchmark dataset that emphasises complex reasoning scenarios. To address the distinct challenges inherent in remote sensing, we propose SegEarth-R1, a language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing and semantic correlation, and a tailored mask generator designed for spatial correlation. Extensive experiments validate SegEarth-R1's superiority, achieving SOTA performance on both geospatial pixel reasoning and referring segmentation tasks. This work pioneers the fusion of natural language reasoning with pixel-level geospatial analysis, offering transformative potential for applications like environmental monitoring and disaster response.", + "bbox": [ + 169, + 119, + 826, + 273 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 291, + 269, + 306 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Seyed Majid Azimi, Corentin Henry, Lars Sommer, Arne Schumann, and Eleonora Vig. Skyscapes fine-grained semantic understanding of aerial scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7393-7403, 2019.", + "[2] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024.", + "[3] Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1209-1218, 2018.", + "[4] Junbum Cha, Wooyoung Kang, Jonghwan Mun, and Byungseok Roh. Honeybee: Locality-enhanced projector for multimodal llm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13817-13827, 2024.", + "[5] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1290–1299, 2022.", + "[6] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in neural information processing systems, 34:17864-17875, 2021.", + "[7] Wei-Lin Chiang, Zhuohan Li, Ziqing Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\\%$ * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023.", + "[8] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. Functional map of the world. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6172-6180, 2018.", + "[9] Ilke Demir, Krzysztof Koperski, David Lindenbaum, Guan Pang, Jing Huang, Saikat Basu, Forest Hughes, Devis Tuia, and Ramesh Raskar. Deep globe 2018: A challenge to parse the earth through satellite images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 172-181, 2018.", + "[10] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16321-16330, 2021.", + "[11] Jian Ding, Nan Xue, Gui-Song Xia, Xiang Bai, Wen Yang, Michael Ying Yang, Serge Belongie, Jiebo Luo, Mihai Datcu, Marcello Pelillo, et al. Object detection in aerial images: A large-scale benchmark and challenges. IEEE transactions on pattern analysis and machine intelligence, 44(11):7778-7796, 2021." + ], + "bbox": [ + 173, + 315, + 825, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 509, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[12] Zhe Dong, Yuzhe Sun, Yanfeng Gu, and Tianzhu Liu. Cross-modal bidirectional interaction model for referring remote sensing image segmentation. arXiv preprint arXiv:2410.08613, 2024.", + "[13] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. International journal of computer vision, 88:303-338, 2010.", + "[14] Rafael C Gonzales and Paul Wintz. Digital image processing. Addison-Wesley Longman Publishing Co., Inc., 1987.", + "[15] Ritwik Gupta, Richard Hosfelt, Sandra Sajeev, Nirav Patel, Bryce Goodman, Jigar Doshi, Eric Heim, Howie Choset, and Matthew Gaston. xbd: A dataset for assessing building damage from satellite imagery. arXiv preprint arXiv:1911.09296, 2019.", + "[16] Junwen He, Yifan Wang, Lijun Wang, Huchuan Lu, Jun-Yan He, Jin-Peng Lan, Bin Luo, and Xuansong Xie. Multi-modal instruction tuned llms with fine-grained visual perception. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 13980-13990, 2024.", + "[17] Ngoc-Vuong Ho, Thinh Phan, Meredith Adkins, Chase Rainwater, Jackson Cothren, and Ngan Le. Rssep: Sequence-to-sequence model for simultaneous referring remote sensing segmentation and detection. In Proceedings of the Asian Conference on Computer Vision, pages 218-231, 2024.", + "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 108-124. Springer, 2016.", + "[19] Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4067-4077, 2023.", + "[20] Zhiwei Hu, Guang Feng, Jiayu Sun, Lihe Zhang, and Huchuan Lu. Bi-directional relationship inferring network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4424-4433, 2020.", + "[21] Shaofei Huang, Tianrui Hui, Si Liu, Guanbin Li, Yunchao Wei, Jizhong Han, Luoqi Liu, and Bo Li. Referring image segmentation via cross-modal progressive comprehension. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10488-10497, 2020.", + "[22] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In European Conference on Computer Vision, pages 59-75. Springer, 2020.", + "[23] Deyi Ji, Feng Zhao, Hongtao Lu, Mingyuan Tao, and Jieping Ye. Ultra-high resolution segmentation with ultra-rich context: A novel benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23621-23630, 2023.", + "[24] Lixia Ji, Yunlong Du, Yiping Dang, Wenzhao Gao, and Han Zhang. A survey of methods for addressing the challenges of referring image segmentation. Neurocomputing, 583:127599, 2024.", + "[25] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023.", + "[26] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024." + ], + "bbox": [ + 171, + 90, + 828, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Mengcheng Lan, Chaofeng Chen, Yue Zhou, Jiaxing Xu, Yiping Ke, Xinjiang Wang, Litong Feng, and Wayne Zhang. Text4seg: Reimagining image segmentation as text generation. arXiv preprint arXiv:2410.09855, 2024.", + "[28] Sen Lei, Xinyu Xiao, Tianlin Zhang, Heng-Chao Li, Zhenwei Shi, and Qing Zhu. Exploring fine-grained image-text alignment for referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024.", + "[29] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR, 2023.", + "[30] Kaiyu Li, Ruixun Liu, Xiangyong Cao, Xueru Bai, Feng Zhou, Deyu Meng, and Zhi Wang. Seearth-ov: Towards training-free open-vocabulary segmentation for remote sensing images. arXiv preprint arXiv:2410.01768, 2024.", + "[31] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS journal of photogrammetry and remote sensing, 159:296-307, 2020.", + "[32] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2018.", + "[33] Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023.", + "[34] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017.", + "[35] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023.", + "[36] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In Proceedings of the IEEE international conference on computer vision, pages 1271-1280, 2017.", + "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023.", + "[38] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18653-18663, 2023.", + "[39] Si Liu, Tianrui Hui, Shaofei Huang, Yunchao Wei, Bo Li, and Guanbin Li. Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):4761-4775, 2021.", + "[40] Sihan Liu, Yiwei Ma, Xiaqing Zhang, Haowei Wang, Jiayi Ji, Xiaoshuai Sun, and Rongrong Ji. Rotated multi-scale interaction network for referring remote sensing image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26658-26668, 2024.", + "[41] Sun-Ao Liu, Yiheng Zhang, Zhaofan Qiu, Hongtao Xie, Yongdong Zhang, and Ting Yao. Caris: Context-aware referring image segmentation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 779-788, 2023.", + "[42] Xu Liu and Zhouhui Lian. Rsunivlm: A unified vision language model for remote sensing via granularity-oriented mixture of experts. arXiv preprint arXiv:2412.05679, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF international conference on computer vision, pages 10012-10022, 2021.", + "[44] Yang Long, Gui-Song Xia, Shengyang Li, Wen Yang, Michael Ying Yang, Xiao Xiang Zhu, Liangpei Zhang, and Deren Li. On creating benchmark dataset for aerial image interpretation: Reviews, guidances, and million-aid. IEEE Journal of selected topics in applied earth observations and remote sensing, 14:4205–4230, 2021.", + "[45] Siqi Lu, Junlin Guo, James R Zimmer-Dauphinee, Jordan M Nieusma, Xiao Wang, Steven A Wernke, Yuankai Huo, et al. Vision foundation models in remote sensing: A survey. IEEE Geoscience and Remote Sensing Magazine, 2025.", + "[46] Xiaoqiang Lu, Binqiang Wang, Xiangtao Zheng, and Xuelong Li. Exploring models and data for remote sensing image caption generation. IEEE Transactions on Geoscience and Remote Sensing, 56(4):2183-2195, 2017.", + "[47] Edgar Margffoy-Tuay, Juan C Pérez, Emilio Botero, and Pablo Arbeláez. Dynamic multimodal instance segmentation guided by natural language queries. In Proceedings of the European Conference on Computer Vision (ECCV), pages 630–645, 2018.", + "[48] Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016.", + "[49] Sayan Nag, Koustava Goswami, and Srikrishna Karanam. Safari: Adaptive sequence tr a ns f ormer for we a kly supervised r eferring expression segmentat i on. In European Conference on Computer Vision, pages 485-503. Springer, 2024.", + "[50] Ruizhe Ou, Yuan Hu, Fan Zhang, Jiaxin Chen, and Yu Liu. Geopix: Multi-modal large language model for pixel-level image understanding in remote sensing. arXiv preprint arXiv:2501.06828, 2025.", + "[51] Chao Pang, Xingxing Weng, Jiang Wu, Jiayu Li, Yi Liu, Jiaxing Sun, Weijia Li, Shuai Wang, Litong Feng, Gui-Song Xia, et al. Vhm: Versatile and honest vision language model for remote sensing image analysis. arXiv preprint arXiv:2403.20213, 2024.", + "[52] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021.", + "[53] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13009-13018, 2024.", + "[54] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024.", + "[55] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26374-26383, 2024.", + "[56] Esther Rolf, Konstantin Klemmer, Caleb Robinson, and Hannah Kerner. Mission critical-satellite data is a distinct modality in machine learning. arXiv preprint arXiv:2402.01444, 2024.", + "[57] Akashah Shabbir, Mohammed Zumri, Mohammed Bennamoun, Fahad S Khan, and Salman Khan. Geopixel: Pixel grounding large multimodal model in remote sensing. arXiv preprint arXiv:2501.13925, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 910 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[58] Chao Shang, Zichen Song, Heqian Qiu, Lanxiao Wang, Fanman Meng, and Hongliang Li. Prompt-driven referring image segmentation with instance contrasting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4124-4134, 2024.", + "[59] Claude E Shannon. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423, 1948.", + "[60] Hengcan Shi, Hongliang Li, Fanman Meng, and Qingbo Wu. Key-word-aware network for referring expression image segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 38-54, 2018.", + "[61] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsenko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024.", + "[62] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023.", + "[63] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017.", + "[64] Fengxiang Wang, Hongzhen Wang, Mingshuo Chen, Di Wang, Yulin Wang, Zonghao Guo, Qiang Ma, Long Lan, Wenjing Yang, Jing Zhang, et al. Xlrs-bench: Could your multimodal llms understand extremely large ultra-high-resolution remote sensing imagery? arXiv preprint arXiv:2503.23771, 2025.", + "[65] Junchi Wang and Lei Ke. Llm-seg: Bridging image segmentation and large language model reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1765-1774, 2024.", + "[66] Junjue Wang, Zhuo Zheng, Zihang Chen, Ailong Ma, and Yanfei Zhong. Earthvqa: Towards queryable earth via relational reasoning-based remote sensing visual question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 5481-5489, 2024.", + "[67] Junjue Wang, Zhuo Zheng, Ailong Ma, Xiaoyan Lu, and Yanfei Zhong. Loveda: A remote sensing land-cover dataset for domain adaptive semantic segmentation. arXiv preprint arXiv:2110.08733, 2021.", + "[68] Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36:61501-61513, 2023.", + "[69] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11686-11695, 2022.", + "[70] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004.", + "[71] Cong Wei, Yujie Zhong, Haoxian Tan, Yingsen Zeng, Yong Liu, Zheng Zhao, and Yujiu Yang. Instructseg: Unifying instructed visual segmentation with multi-modal large language models. arXiv preprint arXiv:2412.14006, 2024.", + "[72] Jiannan Wu, Yi Jiang, Peize Sun, Zehuan Yuan, and Ping Luo. Language as queries for referring video object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4974-4984, 2022.", + "[73] Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Zhe Chen, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionlm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. Advances in Neural Information Processing Systems, 37:69925-69975, 2025." + ], + "bbox": [ + 173, + 90, + 826, + 911 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[74] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. IEEE Transactions on Image Processing, 2024.", + "[75] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3858-3869, 2024.", + "[76] Bin Xiao, Haiping Wu, Weijian Xu, Xiyang Dai, Houdong Hu, Yumao Lu, Michael Zeng, Ce Liu, and Lu Yuan. Florence-2: Advancing a unified representation for a variety of vision tasks (2023). URL https://arxiv.org/abs/2311.06242, 2023.", + "[77] Zunnan Xu, Zhihong Chen, Yong Zhang, Yibing Song, Xiang Wan, and Guanbin Li. Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17503-17512, 2023.", + "[78] Cilin Yan, Haochen Wang, Shilin Yan, Xiaolong Jiang, Yao Hu, Guoliang Kang, Weidi Xie, and Efstratios Gavves. Visa: Reasoning video object segmentation via large language models. In European Conference on Computer Vision, pages 98-115. Springer, 2024.", + "[79] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024.", + "[80] Senqiao Yang, Tianyuan Qu, Xin Lai, Zhuotao Tian, Bohao Peng, Shu Liu, and Jiaya Jia. Lisa++: An improved baseline for reasoning segmentation with large language model. arXiv preprint arXiv:2312.17240, 2023.", + "[81] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18155–18165, 2022.", + "[82] Linli Yao, Lei Li, Shuhuai Ren, Lean Wang, Yuanxin Liu, Xu Sun, and Lu Hou. Deco: Decoupling token compression from semantic abstraction in multimodal large language models. arXiv preprint arXiv:2405.20985, 2024.", + "[83] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10502–10511, 2019.", + "[84] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025.", + "[85] Zhenghang Yuan, Lichao Mou, Yuansheng Hua, and Xiao Xiang Zhu. Rrsis: Referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024.", + "[86] Yang Zhan, Zhitong Xiong, and Yuan Yuan. Rsvg: Exploring data and models for visual grounding on remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 61:1-13, 2023.", + "[87] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498, 2023.", + "[88] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. In International Conference on Machine Learning, pages 60116-60133. PMLR, 2024.", + "[89] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Chen Change Loy, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. Advances in Neural Information Processing Systems, 37:71737-71767, 2025.", + "[90] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14227-14238, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 912 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[91] Yuxuan Zhang, Tianheng Cheng, Rui Hu, Lei Liu, Heng Liu, Longjin Ran, Xiaoxin Chen, Wenyu Liu, and Xinggang Wang. Evf-sam: Early vision-language fusion for text-prompted segment anything model. arXiv preprint arXiv:2406.20076, 2024.", + "[92] Zheng Zhang, Yeyao Ma, Enming Zhang, and Xiang Bai. Psalm: Pixelwise segmentation with large multi-modal model. In European Conference on Computer Vision, pages 74-91. Springer, 2024.", + "[93] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017.", + "[94] Li Zhou, Xu Yuan, Zenghui Sun, Zikun Zhou, and Jingsong Lan. Instruction-guided multi-granularity segmentation and captioning with large multimodal model. arXiv preprint arXiv:2409.13407, 2024.", + "[95] Yue Zhou, Mengcheng Lan, Xiang Li, Yiping Ke, Xue Jiang, Litong Feng, and Wayne Zhang. Geoground: A unified large vision-language model. for remote sensing visual grounding. arXiv preprint arXiv:2411.11904, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 339 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "A Data", + "text_level": 1, + "bbox": [ + 171, + 90, + 250, + 104 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.1 Annotation of EarthReason", + "text_level": 1, + "bbox": [ + 171, + 122, + 408, + 136 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Each sample of the EarthReason benchmark consists of an image, a corresponding mask, and six reasoning queries along with their respective answers. Given that our metadata is derived from classification datasets, we employed GPT-4o and GPT-3.5 to generate textual annotations, and invited multiple remote sensing and vision experts to provide accurate and reliable mask annotations. Overall, our annotation process consists of the following three steps:", + "bbox": [ + 169, + 148, + 826, + 219 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Step-1: To fully leverage the powerful multimodal capabilities and extensive geographic knowledge of GPT-4o, we carefully design the prompt, which is then provided alongside images and their corresponding category labels to generate a reasoning question-answer pair. The prompt is illustrated in Figure 6.", + "- Step-2: To avoid homogeneous question-answer formats under a single prompt, we further employ the textual capabilities of GPT-3.5 to expand each generated question into six variations and each answer into three alternatives. The prompt used for this expansion is shown in Figure 7.", + "- Step-3: Unlike previous methods that rely on semi-automatic mask annotation based on off-the-shelf bounding boxes or masks, we invite multiple remote sensing vision experts to perform accurate and efficient mask annotation guided by the generated questions. To further improve annotation efficiency, we incorporate SAM-H as an auxiliary tool for some simple targets. Subsequently, we perform cross-validation of the annotation results and re-associate the samples that do not meet the quality standards. As shown in Figure 8, (a), (b), and (c), derived from the RRSIS-D dataset, illustrate the masks of semi-automatic annotation based on bounding boxes. (a) and (c) exhibit noticeable annotation errors, while in (b), the query does not align with the annotation. (d), (e), and (f) illustrate our high-quality manual annotations." + ], + "bbox": [ + 181, + 232, + 826, + 469 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Prompt: You are an expert in geographic remote sensing imagery. Please fully analyze the geographical landscape and cultural features in remote sensing images. Generate an implicit reasoning questions based on given object categories. Please use your imagination and feel free to change the sentence structure or add a situation description. Just give the implicit reasoning questions that meet the requirements. The descriptions must refer to the natural landscapes and cultural landscapes shown in remote sensing images. The output implicit reasoning questions need to meet the following requirements:", + "bbox": [ + 179, + 502, + 624, + 558 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(1) Please imagine the scene and output an implicit reasoning question to describe the attributes or functions of the given object. The output question must have a certain degree of reasoning difficulty and be helpful to humans.", + "bbox": [ + 179, + 558, + 624, + 584 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(2) Do not explicitly write the name or description of the target object in the original text. Questions should be asked in the form of asking where, which infrastructure, how to do a certain activity, which location, what object.", + "bbox": [ + 181, + 585, + 624, + 612 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(3) The output cannot be redundant. Just give one question and its answer that you think has the highest confidence. The question should be at least 18 words. The answer requires giving the name of the given object and then explaining why the answer is this, about 20 words.", + "bbox": [ + 181, + 613, + 624, + 640 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "(4) Do not output any redundant information except the question and the answer, and separate them with line break.", + "bbox": [ + 181, + 640, + 624, + 657 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The given object category is . ", + "bbox": [ + 181, + 657, + 433, + 667 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg", + "image_caption": [ + "Figure 6: The illustration of the prompt construction process for generating question-answer pairs for geospatial pixel reasoning." + ], + "image_footnote": [], + "bbox": [ + 181, + 670, + 228, + 686 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 640, + 511, + 805, + 633 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Question: If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?", + "bbox": [ + 179, + 689, + 785, + 700 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Answer: The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response.", + "bbox": [ + 179, + 703, + 808, + 723 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A.2 EarthReason Statistics", + "text_level": 1, + "bbox": [ + 171, + 801, + 375, + 815 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "The EarthReason benchmark comprises 28 categories, and the number of samples in each category is shown in Figure 9 (a). It can be observed that the distribution of the 28 categories is relatively balanced. Figure 9 (b), (c), and (d) illustrate the category distributions in the training, validation, and test sets, respectively. To evaluate the model's generalization capability, we specifically excluded four categories—\"basketball court\", \"island\", \"lake\", and \"stadium\"—from the training set. Moreover, we introduced 119 empty target samples to mitigate potential hallucinations of the model.", + "bbox": [ + 169, + 828, + 823, + 912 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 6 related questions or topics. The given question is \"If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?\"", + "bbox": [ + 179, + 95, + 813, + 126 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 181, + 131, + 225, + 145 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Response: 1. In case of an earthquake, where can individuals find a clear and accessible area nearby to assemble safely? 2. During an earthquake, what spacious and unobstructed area close by would provide the safest environment for all of us to meet? 3. Should an earthquake happen, where in the vicinity can people find an open area to come together safely and securely? 4. In the scenario of an earthquake, where can residents find a secure area with open space and accessible paths for evacuation? 5. In an earthquake situation, which nearby area featuring open space and clear pathways would be the safest choice for resident evacuation? 6. If an earthquake occurs, which types of infrastructure are anticipated to function as dependable gathering spots with adequate open areas?", + "bbox": [ + 179, + 151, + 794, + 209 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 3 related questions or topics. The given description is \"The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response.\"", + "bbox": [ + 181, + 224, + 808, + 253 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg", + "image_caption": [ + "Figure 7: The illustration of the prompt construction process for expand question-answer pairs." + ], + "image_footnote": [], + "bbox": [ + 181, + 263, + 228, + 279 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Response: 1. With minimal obstructions and easy access to main roads, the ground track field offers both safety and convenience, positioning it as the most reliable site in terms of hazard prevention and emergency preparedness. 2. The expansive open space of the track field provides a secure environment, greatly minimizing the chance of falling objects. This makes it an ideal refuge from any potential dangers. 3. With its generous outdoor space devoid of any encroaching structures, the ground track field emerges as the premier choice, ensuring an environment where safety is prioritized.", + "bbox": [ + 179, + 285, + 799, + 325 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg", + "image_caption": [ + "(a) query: The gray bridge" + ], + "image_footnote": [], + "bbox": [ + 187, + 417, + 361, + 523 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg", + "image_caption": [ + "Semi-automatic Annotation", + "(b) query: The blue storage tank" + ], + "image_footnote": [], + "bbox": [ + 408, + 417, + 586, + 523 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg", + "image_caption": [ + "(c) query: The gray bridge in the middle" + ], + "image_footnote": [], + "bbox": [ + 627, + 417, + 805, + 522 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg", + "image_caption": [ + "(d)", + "Figure 8: Comparison of annotation quality. (a), (b) and (c) are from RRSIS-D dataset, (d), (e) and (f) are from our EarthReason dataset." + ], + "image_footnote": [], + "bbox": [ + 187, + 555, + 361, + 657 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg", + "image_caption": [ + "Manual Annotation", + "(e)" + ], + "image_footnote": [], + "bbox": [ + 406, + 556, + 586, + 657 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg", + "image_caption": [ + "(f)" + ], + "image_footnote": [], + "bbox": [ + 627, + 555, + 807, + 657 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B Additional Implementation Details", + "text_level": 1, + "bbox": [ + 171, + 763, + 500, + 781 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "B.1 Details of Training Hyper-parameters", + "text_level": 1, + "bbox": [ + 171, + 815, + 478, + 830 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Table 8 presents the hyper-parameter settings used during the training of our model. For training on the referring segmentation datasets, we employ only focal loss and dice loss to supervise mask generation. In contrast, for training on geospatial pixel reasoning task, we additionally incorporate the cross-entropy loss from the large language model to supervise text answer generation.", + "bbox": [ + 169, + 854, + 825, + 912 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "image", + "img_path": "images/fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg", + "image_caption": [ + "(a) category distribution of the EarthReason" + ], + "image_footnote": [], + "bbox": [ + 181, + 88, + 496, + 239 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg", + "image_caption": [ + "(b) category distribution of the training set" + ], + "image_footnote": [], + "bbox": [ + 501, + 88, + 816, + 241 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg", + "image_caption": [ + "(c) category distribution of the validation set", + "Figure 9: The category distribution of EarthReason." + ], + "image_footnote": [], + "bbox": [ + 181, + 258, + 496, + 409 + ], + "page_idx": 18 + }, + { + "type": "image", + "img_path": "images/8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg", + "image_caption": [ + "(d) category distribution of the test set" + ], + "image_footnote": [], + "bbox": [ + 501, + 258, + 816, + 409 + ], + "page_idx": 18 + }, + { + "type": "table", + "img_path": "images/e983c8e939641fcff8dac8f408090304ed34c7107d976403941096343d9e31d0.jpg", + "table_caption": [ + "Table 8: The hyper-parameters for model training." + ], + "table_footnote": [], + "table_body": "
ParametersValue
OptimizerAdamW
Learning Rate1 × 10-4
Batch Size16
Number of Iteration7,610 / 5,400 / 2,220
Learning Rate ScheduleCosine Decay
Weight Decay0.0
Warmup Ratio0.03
β10.9
β20.999
Image Size1024 × 1024
Image ProcessingResize long edge to 1024 and padding short edge to 1024.
", + "bbox": [ + 295, + 489, + 702, + 686 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C Examples", + "text_level": 1, + "bbox": [ + 171, + 714, + 290, + 732 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.1 More Qualitative Results on EarthReason", + "text_level": 1, + "bbox": [ + 171, + 750, + 508, + 763 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 10 presents a comparison between SegEarth-R1 and other models on the EarthReason dataset. It can be observed that our model demonstrates a better understanding of long reasoning instructions and produces more accurate mask generation.", + "bbox": [ + 169, + 777, + 826, + 819 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "C.2 More Qualitative Results on RRSIS-D", + "text_level": 1, + "bbox": [ + 171, + 840, + 480, + 856 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "Figure 11 presents a comparison between SegEarth-R1 and PSALM on the RRSIS-D dataset. Our model demonstrates a better understanding of direct geographical attributes such as location, color, and size compared to PSALM. This improvement is attributed to the removal of indirect mask", + "bbox": [ + 169, + 869, + 826, + 912 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "prediction using mask tokens, allowing semantic information (description embeddings) to directly interact with image features to generate masks.", + "bbox": [ + 171, + 90, + 823, + 119 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg", + "image_caption": [ + "Figure 10: Comparison with other models on EarthReason." + ], + "image_footnote": [], + "bbox": [ + 176, + 141, + 820, + 720 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 19 + }, + { + "type": "image", + "img_path": "images/c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg", + "image_caption": [ + "Figure 11: Comparison with PSALM on RRSIS-D." + ], + "image_footnote": [], + "bbox": [ + 186, + 125, + 818, + 854 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_model.json b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_model.json new file mode 100644 index 0000000000000000000000000000000000000000..81b7e8e44061b1e4912db8baf550ac765b16c211 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_model.json @@ -0,0 +1,3443 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.263, + 0.061, + 0.709 + ], + "angle": 270, + "content": "arXiv:2504.09644v1 [cs.CV] 13 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.185, + 0.123, + 0.817, + 0.175 + ], + "angle": 0, + "content": "SegEarth-R1: Geospatial Pixel Reasoning via Large Language Model" + }, + { + "type": "text", + "bbox": [ + 0.214, + 0.208, + 0.784, + 0.254 + ], + "angle": 0, + "content": "Kaiyu Li\\(^{1,\\ast}\\), Zepeng Xin\\(^{1,\\ast}\\), Li Pang\\(^{1}\\), Chao Pang\\(^{2}\\), Yupeng Deng\\(^{3}\\), Jing Yao\\(^{3}\\), Guisong Xia\\(^{2}\\), Deyu Meng\\(^{1}\\), Zhi Wang\\(^{1}\\), Xiangyong Cao\\(^{1,\\dagger}\\), Xi'an Jiaotong University" + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.273, + 0.538, + 0.289 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.303, + 0.77, + 0.594 + ], + "angle": 0, + "content": "Remote sensing has become critical for understanding environmental dynamics, urban planning, and disaster management. However, traditional remote sensing workflows often rely on explicit segmentation or detection methods, which struggle to handle complex, implicit queries that require reasoning over spatial context, domain knowledge, and implicit user intent. Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To advance this task, we construct and release the first large-scale benchmark dataset called EarthReason, which comprises 5,434 manually annotated image masks with over 30,000 implicit question-answer pairs. Moreover, we propose SegEarth-R1, a simple yet effective language-guided segmentation baseline that integrates a hierarchical visual encoder, a large language model (LLM) for instruction parsing, and a tailored mask generator for spatial correlation. The design of SegEarth-R1 incorporates domain-specific adaptations, including aggressive visual token compression to handle ultra-high-resolution remote sensing images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Extensive experiments demonstrate that SegEarth-R1 achieves state-of-the-art performance on both reasoning and referring segmentation tasks, significantly outperforming traditional and LLM-based segmentation methods. Our data and code will be released at https://github.com/earth-insights/SegEarth-R1." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.622, + 0.314, + 0.637 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.652, + 0.828, + 0.833 + ], + "angle": 0, + "content": "Earth observation through remote sensing has emerged as a cornerstone of modern geospatial analysis, enabling unprecedented insights into environmental dynamics, urban planning, and disaster management [56, 45]. Satellite and aerial images provide a unique vantage point for monitoring planetary-scale phenomena, ranging from deforestation patterns to coastal erosion. However, converting this raw pixel data into actionable insights requires more than traditional computer vision techniques; it demands models capable of reasoning about spatial context, domain knowledge, and implicit user intent. Conventional remote sensing workflows predominantly rely on explicit tasks, e.g., semantic segmentation and referring segmentation [44, 8, 85], which operate within fixed taxonomies and require precise user instructions. While effective for well-defined scenarios, these approaches struggle to accommodate complex, implicit queries—for example, identifying regions at elevated risk of landslides based on slope, vegetation cover, and proximity to infrastructure. Such tasks limit implicit reasoning over heterogeneous spatial patterns, object relationships, and environmental metadata, exceeding the capabilities of standard segmentation or detection pipelines." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.838, + 0.828, + 0.868 + ], + "angle": 0, + "content": "Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To enable research in this task," + }, + { + "type": "page_footnote", + "bbox": [ + 0.191, + 0.875, + 0.316, + 0.888 + ], + "angle": 0, + "content": "*Equal contribution" + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.89, + 0.532, + 0.904 + ], + "angle": 0, + "content": "†Corresponding author: caoxiangyong@mail.xjtu.edu.cn" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.316, + 0.937 + ], + "angle": 0, + "content": "Preprint. Under review." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.095, + 0.321, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.214, + 0.322, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.195, + 0.33, + 0.305, + 0.337 + ], + "angle": 0, + "content": "A tennis court on the far left" + }, + { + "type": "image_caption", + "bbox": [ + 0.488, + 0.097, + 0.671, + 0.111 + ], + "angle": 0, + "content": "Geospatial Pixel Reasoning" + }, + { + "type": "image", + "bbox": [ + 0.338, + 0.113, + 0.815, + 0.295 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.342, + 0.299, + 0.508, + 0.335 + ], + "angle": 0, + "content": "USER: When there is an urgent necessity for emergency medical services, what designated location facilitates rapid air evacuations for patients?" + }, + { + "type": "image_footnote", + "bbox": [ + 0.545, + 0.299, + 0.807, + 0.335 + ], + "angle": 0, + "content": "ASSISTANT: Perched atop the medical complex, the airport helipad facilitates swift helicopter evacuations for patients in critical condition. This strategic location offers immediate access to vital healthcare services, ensuring timely assistance in emergencies." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.35, + 0.825, + 0.401 + ], + "angle": 0, + "content": "Figure 1: Comparison of semantic segmentation, referring segmentation and geospatial pixel inference. (left) Samples from the LoveDA [67] and RRSIS-D [40] datasets. (right) Samples from the EarthReason dataset. Previous tasks are limited by fixed taxonomies and explicit instructions, while geospatial pixel reasoning supports complex implicit instructions and requires the reasoning capability of the model." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.422, + 0.825, + 0.52 + ], + "angle": 0, + "content": "we build and release the first large-scale benchmark dataset, called EarthReason, which contains 5,434 manually annotated remote sensing image-mask pairs drawn from diverse classification sources, covering 28 scene categories at spatial resolutions ranging from \\(0.5\\mathrm{m}\\) to \\(153\\mathrm{m}\\). Each image is paired with multiple implicit reasoning questions that require the model to infer target masks based on contextual and domain-specific knowledge, rather than explicit object names. In addition, by incorporating empty target cases and varying spatial scales, EarthReason pushes models to generalize across complex real-world remote sensing scenarios." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.526, + 0.825, + 0.678 + ], + "angle": 0, + "content": "Recent progress in multimodal large language models (MLLMs) has demonstrated impressive performance in natural image domains, where models like LISA [26] and PixelLM [55] leverage large language models (LLMs) [62, 7, 79] to interpret rich textual prompts and generate pixel-level outputs. These frameworks excel at tasks such as reasoning segmentation [26], where the target mask is not directly specified but must be inferred from nuanced language cues. Unfortunately, directly transferring these methods to geospatial pixel reasoning is non-trivial since remote sensing images present extreme scale variation, densely packed small-scale objects and ultra-high resolution that violate assumptions of natural images. Moreover, different from natural images, remote sensing queries often require spatial correlations. For instance, identifying \"informal settlements\" relies on detecting roof material irregularities, road network fragmentation, and spatial adjacency to legal land-use zones." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.684, + 0.825, + 0.809 + ], + "angle": 0, + "content": "To address these challenges, we present SegEarth-R1, a simple yet effective language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing, and a tailored mask generator designed for spatial correlation. Further, some components are also designed to adapt to the characteristics of remote sensing images. Specifically, we propose the aggressive visual token compression to handle ultra-high-resolution images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Despite its architectural simplicity, SegEarth-R1 achieves advanced performance on EarthReason and referring segmentation datasets, significantly outperforming both traditional and LLM-based segmentation methods." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.816, + 0.473, + 0.83 + ], + "angle": 0, + "content": "In summary, our contributions are as follows:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.845, + 0.825, + 0.873 + ], + "angle": 0, + "content": "- We introduce the geospatial pixel reasoning task, which requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.884, + 0.825, + 0.913 + ], + "angle": 0, + "content": "- We build and release the first large-scale benchmark with 5,434 image-mask pairs, 28 categories, and over 30,000 implicit question-answer pairs, fostering research in geospatial pixel reasoning." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.845, + 0.825, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "- We propose an LLM-based segmentation model, SegEarth-R1, which incorporates new segmentation capabilities in remote sensing, containing several domain-specific designs." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.138, + 0.825, + 0.166 + ], + "angle": 0, + "content": "- Extensive experiments show that SegEarth-R1 achieves state-of-the-art performance on reasoning and referring segmentation tasks, compared to traditional methods and other LLM-based methods." + }, + { + "type": "list", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.166 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.2, + 0.323, + 0.215 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.239, + 0.382, + 0.254 + ], + "angle": 0, + "content": "2.1 Referring Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.27, + 0.828, + 0.548 + ], + "angle": 0, + "content": "Referring segmentation aims to segment targets in an image based on natural language descriptions, requiring precise alignment between linguistic expressions and visual content. Early approaches adopted CNN-RNN/LSTM frameworks [18, 36, 32, 47, 60, 21] to extract visual features and encode textual queries, respectively. However, these methods struggled with complex expressions due to limited local receptive fields and insufficient cross-modal interaction [24]. To address these limitations, attention mechanisms [63] emerged as a pivotal technique [10, 81, 72, 19, 77, 49, 74, 58]. VLT [10] dynamically generates adaptive query vectors based on image-text interactions, enabling precise localization through cross-modal attention. LAVT [81] further advances this paradigm by integrating hierarchical visual-linguistic fusion within a Swin Transformer [43] backbone, where pixel-word attention refines multiscale features to achieve fine-grained semantic alignment. In remote sensing, specifying segmentation for certain instances can improve interpretation efficiency and user interactivity. Recently, Yuan et al. [85] introduced referring segmentation into satellite images for the first time. Subsequently, following the LAVT [81] architecture, RMSIN [40] also incorporated adaptive rotated convolutions to address scale and orientation variations. FIANet [28] and CroBIM [12] introduced elaborate cross-modal interactions for feature alignment. RSSep [17] reformulated referring segmentation as a sequence-to-sequence task, predicting polygonal boundaries to handle scale variations and blurred edges [38]. However, existing methods effectively follow explicit instructions for target segmentation but lack implicit intent reasoning. In this paper, the proposed geospatial pixel reasoning task advances beyond referring segmentation by employing LLMs' reasoning capabilities to interpret subtle instructions and accurately segment desired targets." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.577, + 0.395, + 0.592 + ], + "angle": 0, + "content": "2.2 LLM-based Segmentation" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.608, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Recent advances in LLMs have significantly expanded their capabilities to integrate pixel-level segmentation with language reasoning [76, 68, 73, 2, 61, 88, 84, 16]. For instance, Florence-2 [76] unified text, detection, and segmentation through a sequence-to-sequence framework with task instructions. To address the complexity of real-world segmentation scenarios, some works focus on architectural specialization and instruction-aware adaptation. LISA [26, 80] established the paradigm by introducing a [SEG] token to connect LLMs with segmentation decoders like SAM [25], enabling language-guided mask prediction. Subsequent studies enhanced this paradigm: GSVA [75] introduced shared-weight [SEG] tokens and [REJ] tokens for multi-target and empty-target handling [35, 55, 90], while GLaMM [53] achieved pixel-grounded conversational capabilities through holistic segmentation [94]. Parallel efforts focused on architectural unification - PSALM [92] established a flexible input schema for multi-task segmentation, and OMG-LLaVA [89] combined universal segmentation backbones with LLMs for pixel-level reasoning. Video understanding extensions emerged through VISA [78] and InstructSeg [71], which integrated temporal reasoning. Notably, Text4Seg [27] redefined segmentation as a text generation problem using semantic descriptors, eliminating the need for an additional decoder. In remote sensing, benefiting from the above paradigms [26, 27], some unified models such as RSUniVLM [42], GeoGround [95] and GeoPix [50] are equipped with segmentation capabilities. Although based on LLM, these models focus only on explicit text-guided segmentation. Further, GeoPixel [57] introduced grounded conversation generation [53] to remote sensing, but it still does not provide reasoning capability. Our SegEarth-R1 also follows the LLM-based segmentation paradigm, but is different from previous methods. Specifically, SegEarth-R1 is the first work to support reasoning about the target region from implicit queries, and its components are specifically designed for the challenges in remote sensing." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.089, + 0.825, + 0.131 + ], + "angle": 0, + "content": "Table 1: Comparison between EarthReason and other related datasets. The gray rendering denotes the natural image dataset. \"Seg\", \"Det\", \"VG\", \"Cls\" denote segmentation, detection, visual grounding and classification datasets, respectively." + }, + { + "type": "table", + "bbox": [ + 0.184, + 0.139, + 0.819, + 0.251 + ], + "angle": 0, + "content": "
DatasetMask LabelReasoning QuerySpatial resolutionImage SizeImage NumImage SourceClass Num
ReasonSeg [26]--1,218OpenImages (Seg) & ScanNetv2 (Seg)-
LLM-Seg40K [65]--14,000LVIS (Seg) & EgoObjects (Seg)-
EarthVQA [66]X0.3m102426,000LoveDA (Seg)14
RegSegRS [85]X0.5m-30m80024,420SkyScapes (Seg)14
RRSIS-D [40]X0.13m512217,402RSVGD (VG) & DIOR (OD)20
RISBench [12]X0.1m-30m512252,472DOTAv2(OD) & DIOR (OD)26
EarthReason0.5m-153m1232-761725,434AID (Cls) & fMoW (Cls)28
" + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.274, + 0.733, + 0.291 + ], + "angle": 0, + "content": "3 Benchmark Geospatial Pixel Reasoning Dataset—EarthReason" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.306, + 0.45, + 0.321 + ], + "angle": 0, + "content": "3.1 Comparison with Related Dataset" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.331, + 0.828, + 0.595 + ], + "angle": 0, + "content": "We analyze three types of tasks and datasets related to geospatial pixel reasoning, i.e., natural image reasoning segmentation, remote sensing visual question answering (VQA), and remote sensing referring segmentation, as shown in Table 1. RefSegRS [85] and RRSIS-D [40] provide early benchmarks with image-text-mask triplets. RISBench [12], the largest RRSIS dataset to date, introduced 52,472 triplets with oriented bounding boxes and pixel-level masks generated via a semi-automatic pipeline. These datasets address the limitations of earlier text-focused datasets (e.g., RSICD [46], EarthVQA [66], etc.) and enable comprehensive evaluation of multimodal models. Compared to the previous referring segmentation datasets, our EarthReason datasets has the following features: (1) The mask labels in EarthReason are not explicitly specified by the query, but require further reasoning to determine the target, which challenges the model's reasoning ability. (2) EarthReason uses a more raw data source. The previous related datasets directly transform existing segmentation datasets [1, 67] or SAM-processed detection datasets [86, 31, 11], while our EarthReason uses images from classification datasets [44, 8] and we manually annotate them. This allows EarthReason to provide more data gain when it comes to co-training of unified segmentation tasks. (3) EarthReason has more diverse spatial resolutions and image sizes, which are conducive to solving the object scale spanning problem inherent in remote sensing images [56]. Compared to the first natural image reasoning segmentation dataset, ReasonSeg, EarthReason contains \\(4.46 \\times\\) more data than it. Therefore, we believe that EarthReason, as the first geospatial pixel reasoning dataset in the remote sensing area, is capable of performing initial explorations of this task." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.611, + 0.411, + 0.627 + ], + "angle": 0, + "content": "3.2 Dataset Generation Pipeline" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.637, + 0.825, + 0.666 + ], + "angle": 0, + "content": "Our benchmark dataset EarthReason is generated according to the following three steps, i.e., image collection, question-answer pair generation, and object mask labeling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.672, + 0.827, + 0.797 + ], + "angle": 0, + "content": "Image Collection. As mentioned above, to avoid potential data leakage in the future construction of unified segmentation models for remote sensing, we collect images from existing classification data. Although this increases the annotation cost, it also motivates more diverse scenes. Specifically, we first select the 28 categories that are more suitable for reasoning in the Million-AID [44] dataset, and sample about 200 images for each category. Then, we find that the actual geographic range contained in Million-AID's images is limited. Thus, we also collect 800 images in the fMoW [8] dataset to enhance the model's reasoning ability in complex scenes. Further, to alleviate the factitious illusion issue [51], we add an extra 200 empty target images (i.e., the implied target is not in the image). Finally, some low-quality images are eliminated, and we obtain a total of 5,434 images." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.802, + 0.825, + 0.887 + ], + "angle": 0, + "content": "Question-Answer Pair Generation. We use GPT-4o1 to construct question-answer pairs, and given its excellent visual comprehension, we take the remote sensing image and the corresponding scene category (provided by Million-AID and fMoW) as part of the prompt to generate questions and answers that are closely related to the image. An example of such a prompt is illustrated in Appendix A.1. In addition, following [26], to make the questions and answers diverse, we adapt GPT-3.5 to rephrase the instructional questions and answers, as shown in Appendix Figure 7." + }, + { + "type": "page_footnote", + "bbox": [ + 0.193, + 0.898, + 0.559, + 0.912 + ], + "angle": 0, + "content": "1https://platform.openai.com/docs/models/gpt-4o" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.176, + 0.093, + 0.825, + 0.259 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.266, + 0.828, + 0.331 + ], + "angle": 0, + "content": "Figure 2: Overview of the proposed SegEarth-R1 architecture. Given an image \\( X_{v} \\) and a text description \\( X_{q} \\), a hierarchical visual encoder and a proposed connector are used to extract and compress visual tokens. Then, the visual tokens \\( \\square \\) and description embeddings \\( \\square \\) are fed into an LLM for instruction interpretation and semantic correlation. Finally, description embeddings are directly mapped to the query vector and used for spatial correlation and segmentation mask generation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.354, + 0.828, + 0.439 + ], + "angle": 0, + "content": "Object Mask Labeling. Different from previous referring and reasoning segmentation datasets (which use off-the-shelf masks or bounding boxes), we annotate images from scratch. Specifically, we employ multiple experts in remote sensing and vision, assign each expert a few hundred images to annotate, and cross-validate the annotations after they are completed. For simple targets (e.g., lake), SAM-H [25] is used to assist in annotation; for complex targets (e.g., wind turbine), each point of the polygon is finely marked. A description of mask quality is provided in Appendix A.1." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.444, + 0.826, + 0.542 + ], + "angle": 0, + "content": "Dataset Statistics. The EarthReason dataset is partitioned into training, validation, and testing sets, comprising 2,371, 1,135, and 1,928 images, respectively. In the training set, each image is annotated with an average of six questions and three corresponding answers. The average question length is 20.86 words, while the average answer length is 26.76 words. To assess the model's generalization capability, several semantic categories are deliberately reserved for the validation and test sets, ensuring they remain unseen during training. Additional dataset details are provided in the Appendix A.2." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.57, + 0.707, + 0.588 + ], + "angle": 0, + "content": "4 Baseline Geospatial Pixel Reasoning Method—SegEarth-R1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.607, + 0.825, + 0.718 + ], + "angle": 0, + "content": "Compared with natural images, remote sensing images exhibit distinctive characteristics that demand specialized architectural designs for pixel-wise geospatial reasoning. In this work, we propose SegEarth-R1, a simple yet powerful baseline for geospatial pixel reasoning that effectively harnesses LLM capabilities while incorporating domain-specific adaptations. As illustrated in Figure 2, our architecture comprises three core parts: A visual encoder for image feature extraction, an LLM for instruction interpretation and semantic correlation, and a mask generator for spatial correlation and mask prediction. Each part incorporates critical design considerations to address the unique challenges of remote sensing images." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.744, + 0.414, + 0.758 + ], + "angle": 0, + "content": "4.1 Hierarchical Visual Encoder" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.773, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Satellite and aerial targets present two critical challenges: (1) extreme scale variations ranging from sub-meter objects to kilometer-scale geographical formations [56], and (2) densely distributed small objects requiring high-resolution analysis [30]. Conventional ViT-based encoders adopted in MLLMs [26, 80, 25, 75] (e.g., image encoder in CLIP [52] and SAM [25, 54]) prove suboptimal due to their fixed-scale feature extraction and information compression through aggressive patch merging. To alleviate these limitations, following [92], SegEarth-R1 employs a Swin Transformer [43] backbone enhanced with progressive feature hierarchy construction. This architecture generates multi-scale feature maps \\( v_{h}, h \\in [1,4] \\) at \\( 1/4, 1/8, 1/16, 1/32 \\) of the original resolution through controlled downsampling operations, preserving high-resolution details for small objects while capturing contextual semantics at deeper layers." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.092, + 0.51, + 0.108 + ], + "angle": 0, + "content": "4.2 Large Language Model and Input Schema" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.117, + 0.825, + 0.187 + ], + "angle": 0, + "content": "SegEarth-R1 adopts the MLLM paradigm [37, 29] by jointly embedding visual tokens and textual instructions into a unified LLM input space for multimodal reasoning. Unlike natural images, remote sensing data exhibits ultra-high-resolution coverage [23, 64], posing computational challenges when processed through billion-level LLMs. Therefore, we expect to compress the visual token to alleviate the computational cost and make only simple semantic correlations in LLM." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.201, + 0.318, + 0.215 + ], + "angle": 0, + "content": "4.2.1 Visual Token" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.225, + 0.825, + 0.282 + ], + "angle": 0, + "content": "Redundancy Analysis. Image redundancy quantifies the proportion of compressible, non-informative data within an image. To investigate the feasibility of aggressive visual token compression for remote sensing images, we conduct a redundancy analysis from dual perspectives: pixel-level statistical redundancy and spatial structural redundancy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.293, + 0.825, + 0.351 + ], + "angle": 0, + "content": "- According to information theory [59], entropy measures the average uncertainty or information content of an image, while the maximum entropy corresponds to the idealized scenario where pixel values are uniformly distributed (i.e., no redundancy). Thus, from the entropy perspective, the image redundancy can be defined as [14]:" + }, + { + "type": "equation", + "bbox": [ + 0.389, + 0.356, + 0.826, + 0.392 + ], + "angle": 0, + "content": "\\[\nR _ {e} = 1 - \\frac {- \\sum_ {l = 0} ^ {L - 1} p (l) \\log_ {2} p (l)}{\\log_ {2} L}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.399, + 0.826, + 0.429 + ], + "angle": 0, + "content": "where \\( L \\) denotes the number of distinct intensity levels (e.g., \\( L = 256 \\) for an 8-bit grayscale image), and \\( p(l) \\) denotes the probability mass function of the pixel intensity value \\( l \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.44, + 0.825, + 0.496 + ], + "angle": 0, + "content": "- Beyond pixel-level statistical redundancy, structural self-similarity reflects spatial redundancy caused by repetitive patterns (e.g., textures, geometric features). To quantify this, we leverage the Structural Similarity Index Matrix (SSIM) [70] to measure inter-patch similarity. For an image partitioned into \\(N\\) patches, the SSIM matrix \\(\\mathbf{M} \\in \\mathbb{R}^{N \\times N}\\) is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.295, + 0.503, + 0.826, + 0.54 + ], + "angle": 0, + "content": "\\[\n\\mathbf {M} (i, j) = \\frac {(2 \\mu_ {i} \\mu_ {j} + C _ {1}) (2 \\sigma_ {i j} + C _ {2})}{(\\mu_ {i} ^ {2} + \\mu_ {j} ^ {2} + C _ {1}) (\\sigma_ {i} ^ {2} + \\sigma_ {j} ^ {2} + C _ {2})}, \\quad \\forall i, j \\in 1, \\dots , N \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.183, + 0.545, + 0.825, + 0.587 + ], + "angle": 0, + "content": "where \\(\\mu_{i},\\sigma_{i}\\) denote the mean and variance of the \\(i\\) -th patch, \\(\\sigma_{ij}\\) is the covariance between patches \\(i\\) and \\(j\\), and \\(C_1,C_2\\) are stability constants. Then, the structural self-similarity redundancy \\(R_{s}\\) is derived by averaging off-diagonal elements of \\(\\mathbf{M}\\):" + }, + { + "type": "equation", + "bbox": [ + 0.402, + 0.593, + 0.826, + 0.632 + ], + "angle": 0, + "content": "\\[\nR _ {s} = \\frac {1}{N (N - 1)} \\sum_ {i \\neq j} \\mathbf {M} (i, j). \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.642, + 0.422, + 0.795 + ], + "angle": 0, + "content": "We evaluate six benchmark datasets spanning natural images (COCO [3], ADE20K [93], PASCAL [13]) and remote sensing images (LoveDA [67], DeepGlobe [9], xBD [15]) for redundancy analysis. As shown in Figure 3, our analysis reveals two critical findings: 1) Remote sensing images demonstrate \\(1.9\\sim 3.3\\times\\) higher entropic redundancy than natural images, indicating greater pixel-level" + }, + { + "type": "image", + "bbox": [ + 0.443, + 0.654, + 0.625, + 0.735 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.455, + 0.742, + 0.612, + 0.756 + ], + "angle": 0, + "content": "(a) pixel-level redundancy" + }, + { + "type": "image", + "bbox": [ + 0.633, + 0.654, + 0.815, + 0.734 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.629, + 0.742, + 0.817, + 0.756 + ], + "angle": 0, + "content": "(b) spatial structure redundancy" + }, + { + "type": "image_caption", + "bbox": [ + 0.431, + 0.764, + 0.825, + 0.791 + ], + "angle": 0, + "content": "Figure 3: Redundancy analysis of remote sensing datasets and natural images, and the former exhibits higher redundancy." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.795, + 0.825, + 0.838 + ], + "angle": 0, + "content": "compressibility. 2) The average self-similarity for remote sensing data exceeds natural images by \\(42.6\\%\\), confirming the higher prevalence of repetitive textures and geometric patterns. This insight justifies aggressive token compression for semantic-level comprehension in remote sensing images." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.826, + 0.913 + ], + "angle": 0, + "content": "Token Compression Connector. In modern MLLM, connectors such as Q-Former [29] and MLP [37] are designed to transform visual tokens into a multi-modal space. However, some works [4, 82] point out that Q-Former may lead to loss of vision information and is difficult to train. Therefore, in SegEarth-R1, we follow the MLP connector fashion in LLaVA [37] and use a simple but effective connector, i.e., stacked convolutional blocks and Layer Normalization (LN). Here, convolutional" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.12 + ], + "angle": 0, + "content": "blocks are used for spatial down-sampling to compress the size of the feature map, and LN is used to stabilize cross-modal training. Specifically, our connector can be formulated as:" + }, + { + "type": "equation", + "bbox": [ + 0.404, + 0.123, + 0.826, + 0.141 + ], + "angle": 0, + "content": "\\[\nv _ {o u t} = \\left(\\operatorname {C o n v} \\circ L N\\right) ^ {d} \\left(v _ {4}\\right), \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.142, + 0.798, + 0.157 + ], + "angle": 0, + "content": "where \\( \\circ \\) denotes the function composition operator, and \\( d \\) denotes the number of stacked layers." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.17, + 0.338, + 0.185 + ], + "angle": 0, + "content": "4.2.2 Text Instruction" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.194, + 0.827, + 0.279 + ], + "angle": 0, + "content": "Although the instructions involved in geospatial pixel reasoning are implicit and contain more words than referring segmentation, they still maintain the same data format. Therefore, it is easy to convert them into question-answer pairs using a template like \"USER: This is an image , please doing geospatial pixel reasoning according to the following instruction: . ASSISTANT: \\) in text instruction." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.148 + ], + "angle": 0, + "content": "Network Architecture. Unless otherwise specified, SegEarth-R1 use phi-1.5 (1.3B) [33] as the LLM, and adopt the Swin-B as the visual encoder. The token compression connector is configured with a layer number \\( d = 2 \\). The mask generator follows the Mask2Former architecture, but removes mask tokens as mentioned above." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.154, + 0.827, + 0.253 + ], + "angle": 0, + "content": "Implementation details. During training, we use bf16 precision and freeze the visual encoder. The LLM is initialized from Phi-1.5, while both the Swin-B encoder and the mask generator are initialized with pretrained weights from Mask2Former. All images are resized to \\(1024 \\times 1024\\), maintaining the original aspect ratio by padding the shorter side. We adopt the AdamW optimizer with an initial learning rate of \\(1 \\times 10^{-4}\\), cosine learning rate schedule, and no weight decay. A uniform batch size of 16 is used across datasets, with training steps set to 7,610 (RRSIS-D), 5,400 (RefSegRS), and 2,220 (EarthReason). All experiments are conducted on two NVIDIA A100 80GB GPUs." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.282, + 0.459, + 0.296 + ], + "angle": 0, + "content": "5.2 Geospatial Pixel Reasoning Results" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.317, + 0.632, + 0.343 + ], + "angle": 0, + "content": "Table 2: Geospatial pixel reasoning results among SegEarth-R1 (ours) and previous related works." + }, + { + "type": "table", + "bbox": [ + 0.18, + 0.351, + 0.625, + 0.428 + ], + "angle": 0, + "content": "
MethodVisual EncoderLLM TypecloUgloU
ValTestValTest
LISA [26]CLIP-LVicuna-7B [7]57.3959.1061.0460.88
PixelLM [55]CLIP-LVicuna-7B [7]57.7959.2257.9460.01
PSALM [92]Swin-Bphi-1.5 (1.3B) [33]62.0364.6166.6168.30
SegEarth-R1Swin-Bphi-1.5 (1.3B) [33]64.1368.2568.6070.75
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.436, + 0.825, + 0.562 + ], + "angle": 0, + "content": "LISA and PixelLM demonstrate comparable performance; however, despite leveraging larger LLM or MLLM, the quality of their predicted segmentation masks remains suboptimal. This can be primarily attributed to their reliance on CLIP as the visual encoder, which tends to diminish the representation of small-scale geospatial targets. As one of the baselines of SegEarth-R1, PSALM achieves notable improvements over LISA and PixelLM. Nevertheless, PSALM does not adequately incorporate LLM-based segmentation and the Mask2Former paradigm, and lacks considerations for overhead images. SegEarth-R1 achieves the best results on both metrics surpassing PSALM by \\(3.64\\%\\) and \\(2.45\\%\\) on the test set. Importantly, SegEarth-R1 uses fewer visual tokens in LLM and reduces the number of queries in the mask generator, thus providing a lower inference cost." + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.312, + 0.827, + 0.436 + ], + "angle": 0, + "content": "We conduct a comparative evaluation of SOTA LLM-based methods and SegEarth-R1 on the Earth-Reason dataset. As shown in Table 2, all models are trained solely on the training split of EarthReason to ensure a fair comparison." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.591, + 0.436, + 0.606 + ], + "angle": 0, + "content": "5.3 Referring Segmentation Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.622, + 0.455, + 0.87 + ], + "angle": 0, + "content": "SegEarth-R1 also supports basic explicit language-guided segmentation. As shown in Table 3, we compare its performance with existing SOTA traditional methods (not based on LLM) as well as recent LLM-based methods. Notably, prior to SegEarth-R1, LLM-based methods consistently underperformed in comparison to traditional methods on the referring segmentation task. For instance, the advanced GeoGround [95] lags behind RMSIN [40] by \\(3.7\\%\\) in terms of gIoU on the RRSIS-D dataset. In contrast, SegEarth-R1, as a universal LLM-based language-guided segmentation method, surpasses traditional methods on the referring segmentation task for the first time with a \\(2.2\\%\\) improvement. This result highlights the enhanced general" + }, + { + "type": "table_caption", + "bbox": [ + 0.463, + 0.627, + 0.825, + 0.653 + ], + "angle": 0, + "content": "Table 3: Referring segmentation results among SegEarth-R1 and previous related works on RRSIS-D dataset." + }, + { + "type": "table", + "bbox": [ + 0.476, + 0.661, + 0.816, + 0.853 + ], + "angle": 0, + "content": "
MethodP@0.5cloUgloU
ValTestValTestValTest
Traditional method:
RRN [32]CVPR'1851.0951.0766.5366.4346.0645.64
CSMC [83]CVPR'1955.6855.3269.3969.3948.8548.54
LSCM [22]ECCV'2057.1256.0269.0569.2850.3649.92
CMPC [21]CVPR'2057.9355.8369.2269.3950.4149.24
BRINet [20]CVPR'2058.7956.9070.7369.8851.1449.65
CMPC+ [39]TPAMI'2059.1957.6570.1468.6451.4150.24
LGCE [85]TGRS'2468.1067.6576.6876.3460.1659.37
RIS-DMMI [19]CVPR'2370.4068.7477.0176.2060.7260.12
LAVT [81]CVPR'2269.5469.5277.5977.1961.4661.04
RMSIN [40]CVPR'2474.6674.2678.2777.7965.1064.20
LLM-based method:
LISA [26]CVPR'2427.0724.51--27.8426.78
PixelLM [55]CVPR'2433.4628.81--33.8931.65
NEXT-Chat [87]arXiv'2328.9726.37--26.9824.98
GeoGround [95]arXiv'2568.6967.50--61.1060.50
SegEarth-R178.6276.9678.9278.0167.5666.40
" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.825, + 0.913 + ], + "angle": 0, + "content": "ization capability and practical potential of SegEarth-R1. On the RefSegRS dataset, the improvement of SegEarth-R1 is more significant than the previous method, with an \\(8.33\\%\\) and \\(9.87\\%\\) improvement over RMSIN on the validation and testing sets, respectively, as listed in Table 4." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "table_caption", + "bbox": [ + 0.174, + 0.099, + 0.822, + 0.112 + ], + "angle": 0, + "content": "Table 4: Referring segmentation results among SegEarth-R1 and previous related works on RefSegRS dataset." + }, + { + "type": "table", + "bbox": [ + 0.175, + 0.112, + 0.825, + 0.27 + ], + "angle": 0, + "content": "
MethodP@0.5P@0.6P@0.7P@0.8P@0.9cloUgloU
ValTestValTestValTestValTestValTestValTestValTest
BRINet [20]CVPR'2036.8620.7235.5314.2619.939.8710.662.982.841.1461.5958.2238.7331.51
LSCM [22]ECCV'2056.8231.5441.2420.4121.859.5112.115.292.510.8462.8261.2740.5935.54
CMPC [21]CVPR'2046.0932.3626.4514.1412.766.557.421.761.390.2263.5555.3942.0840.63
CMSA [83]CVPR'1939.2428.0738.4420.2520.3912.7111.795.611.520.8365.8464.5343.6241.47
RRN [32]CVPR'1855.4330.2642.9823.0123.1114.8713.727.172.640.9869.2465.0650.8141.88
EVF-SAM [91]Arxiv'2457.7735.1737.5922.3416.249.364.872.861.860.3959.6155.5146.9836.64
CMPC+ [39]TPAMI'2156.8449.1937.5928.3120.4215.3110.678.122.780.5570.6266.5347.1343.65
CARIS [41]ACMMM'2368.4545.4047.1027.1925.5215.0814.628.873.711.9875.7969.7454.3042.66
CRIS [69]CVPR'2253.1335.7736.1924.1124.3614.3611.836.382.551.2172.1465.8753.7443.26
LAVT [81]CVPR'2280.9751.8458.7030.2731.0917.3415.559.524.642.0978.5071.8661.5347.40
RIS-DMMI [19]CVPR'2386.1763.8974.7144.3038.0519.8118.106.493.251.0074.0268.5865.7252.15
LGCE [85]TGRS'2490.7273.7586.3161.1471.9339.4632.9516.0210.215.4583.5676.8172.5159.96
RMSIN [40]CVPR'2493.9779.2089.3365.9974.2542.9829.7016.517.893.2582.4175.7273.8462.58
SegEarth-R195.8286.3093.2779.5388.8669.5778.1948.8722.0410.7385.0179.0082.1772.45
" + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.296, + 0.291, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.293, + 0.298, + 0.385, + 0.378 + ], + "angle": 0, + "content": "USER: In this region, what infrastructure changes could best support an extended growing season for local agricultural crops? ASISTANT: The greenhouse creates a controlled environment that shields plants from harsh weather, harmful insects, and infectious diseases, all of which can threaten the health of the plant." + }, + { + "type": "image", + "bbox": [ + 0.389, + 0.297, + 0.501, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.503, + 0.298, + 0.619, + 0.385 + ], + "angle": 0, + "content": "USER: Which types of facilities would aid small boat docking in this coastal area, while also contributing positively to tourism and local fisheries? \nASSISTANT: A pier is a man-made construction that extends over the water's surface, acting as a convenient spot for boats to moor while also providing easy access for passengers to get on and off. It serves as a vital hub for the maritime industry, ensuring smooth operations and timely delivery of products." + }, + { + "type": "image", + "bbox": [ + 0.624, + 0.297, + 0.736, + 0.385 + ], + "angle": 0, + "content": null + }, + { + "type": "image_footnote", + "bbox": [ + 0.738, + 0.298, + 0.82, + 0.385 + ], + "angle": 0, + "content": "USER: What locations facilitate the takeoff and landing of aircraft under supervision from air traffic controllers in a regulated setting? \nASSISTANT: The airport runway is meticulously crafted to facilitate the safe ascent and descent of aircraft, ensuring that every flight begins and ends with precision." + }, + { + "type": "image_caption", + "bbox": [ + 0.184, + 0.395, + 0.812, + 0.41 + ], + "angle": 0, + "content": "Figure 5: Qualitative Results of SegEarth-R1 on EarthReason. More results can be found in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.434, + 0.32, + 0.451 + ], + "angle": 0, + "content": "5.4 Ablation Study" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.465, + 0.536, + 0.517 + ], + "angle": 0, + "content": "Table 5: Ablation of SegEarth-R1 components on EarthReason: query description embedding (Query D.E.), description projector \\((D\\) -Projectile), token compression connector (T.C. Connector)." + }, + { + "type": "table", + "bbox": [ + 0.184, + 0.525, + 0.526, + 0.616 + ], + "angle": 0, + "content": "
Query D.E.D-ProjectorT.C. ConnectorcloUgloU
ValTestValTest
XXX62.0364.6166.6168.30
XX63.3466.1967.4269.15
XX63.3266.3167.2269.21
XX63.4765.4168.3169.20
X64.1266.7168.6169.61
64.1368.2568.6070.75
" + }, + { + "type": "table_caption", + "bbox": [ + 0.542, + 0.465, + 0.816, + 0.48 + ], + "angle": 0, + "content": "Table 6: Ablation of LLM type on RRSIS-D." + }, + { + "type": "table", + "bbox": [ + 0.564, + 0.488, + 0.8, + 0.547 + ], + "angle": 0, + "content": "
LLM TypecloUgloU
ValTestValTest
phi-1.5 (1.3B)78.9278.0167.5666.40
phi-2 (2B)78.9878.3567.9166.67
Qwen2.5 (0.5B)78.5377.8767.7066.49
" + }, + { + "type": "table_caption", + "bbox": [ + 0.537, + 0.552, + 0.82, + 0.566 + ], + "angle": 0, + "content": "Table 7: Ablation of \\(d\\) on EarthReason Val set." + }, + { + "type": "table", + "bbox": [ + 0.554, + 0.575, + 0.808, + 0.613 + ], + "angle": 0, + "content": "
d#Visual TokengIoU| | d#Visual TokengIoU
0102468.2826468.60
125668.4731668.22
" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.622, + 0.827, + 0.748 + ], + "angle": 0, + "content": "Components. We conduct ablation studies on the EarthReason dataset to evaluate the effectiveness of the novel components involved in SegEarth-R1. As listed in Table 5, the first row shows the results of the PSALM baseline. Each proposed component contributes to performance enhancement, yielding improvements ranging from \\(0.85\\%\\) to \\(0.9\\%\\). The T.C. Connector and Query D.E. not only enhances performance but also reduces computational overhead. Further, the proposed components can be well coupled, and when they are all activated, i.e., complete SegEarth-R1, all metrics exhibit substantial gains over the baseline, confirming the effectiveness and compatibility of the proposed design. In fact, although these components are initially designed with remote sensing scenarios in mind, their underlying principles offer transferable insights applicable to general image understanding." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.753, + 0.828, + 0.81 + ], + "angle": 0, + "content": "LLM Type. Given the limited scale of the dataset, we select some small LLM for comparison, as presented in Table 6. SegEarth-R1 demonstrates consistently high performance across different LLM, indicating the robustness and architectural stability of the overall framework. Notably, with Qwen2.5 (0.5B) [79], it still achieves competitive results, indicating its potential for edge deployment." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.815, + 0.828, + 0.913 + ], + "angle": 0, + "content": "Layer Number of T.C. Connector. The layer number \\(d\\) controls the number of visual tokens fed into the LLM. As shown in Table 7, increasing token quantity does not improve performance. This observation aligns with our earlier analysis, suggesting that appropriate compression of visual tokens is beneficial for the global understanding of a remote sensing image. In SegEarth-R1, spatial correlations between the image and the instruction are primarily handled by the mask generator, while the LLM is only responsible for relatively semantic correlations. This division of labor allows for more efficient use of computational resources without compromising performance." + }, + { + "type": "page_number", + "bbox": [ + 0.493, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.172, + 0.09, + 0.303, + 0.107 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.121, + 0.828, + 0.274 + ], + "angle": 0, + "content": "In this paper, we introduce geospatial pixel reasoning, a new task in remote sensing that requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge. To enable research in this direction, we present EarthReason, the first large-scale benchmark dataset that emphasises complex reasoning scenarios. To address the distinct challenges inherent in remote sensing, we propose SegEarth-R1, a language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing and semantic correlation, and a tailored mask generator designed for spatial correlation. Extensive experiments validate SegEarth-R1's superiority, achieving SOTA performance on both geospatial pixel reasoning and referring segmentation tasks. This work pioneers the fusion of natural language reasoning with pixel-level geospatial analysis, offering transformative potential for applications like environmental monitoring and disaster response." + }, + { + "type": "title", + "bbox": [ + 0.174, + 0.292, + 0.27, + 0.308 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.316, + 0.826, + 0.36 + ], + "angle": 0, + "content": "[1] Seyed Majid Azimi, Corentin Henry, Lars Sommer, Arne Schumann, and Eleonora Vig. Skyscapes fine-grained semantic understanding of aerial scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7393-7403, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.367, + 0.826, + 0.411 + ], + "angle": 0, + "content": "[2] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.418, + 0.826, + 0.462 + ], + "angle": 0, + "content": "[3] Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1209-1218, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.47, + 0.826, + 0.513 + ], + "angle": 0, + "content": "[4] Junbum Cha, Wooyoung Kang, Jonghwan Mun, and Byungseok Roh. Honeybee: Locality-enhanced projector for multimodal llm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13817-13827, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.521, + 0.826, + 0.564 + ], + "angle": 0, + "content": "[5] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1290–1299, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.572, + 0.826, + 0.614 + ], + "angle": 0, + "content": "[6] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in neural information processing systems, 34:17864-17875, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.623, + 0.826, + 0.68 + ], + "angle": 0, + "content": "[7] Wei-Lin Chiang, Zhuohan Li, Ziqing Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with \\(90\\%\\) * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.688, + 0.826, + 0.731 + ], + "angle": 0, + "content": "[8] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. Functional map of the world. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6172-6180, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.74, + 0.826, + 0.796 + ], + "angle": 0, + "content": "[9] Ilke Demir, Krzysztof Koperski, David Lindenbaum, Guan Pang, Jing Huang, Saikat Basu, Forest Hughes, Devis Tuia, and Ramesh Raskar. Deep globe 2018: A challenge to parse the earth through satellite images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 172-181, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.804, + 0.826, + 0.847 + ], + "angle": 0, + "content": "[10] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16321-16330, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.856, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[11] Jian Ding, Nan Xue, Gui-Song Xia, Xiang Bai, Wen Yang, Michael Ying Yang, Serge Belongie, Jiebo Luo, Mihai Datcu, Marcello Pelillo, et al. Object detection in aerial images: A large-scale benchmark and challenges. IEEE transactions on pattern analysis and machine intelligence, 44(11):7778-7796, 2021." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.316, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.511, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.133 + ], + "angle": 0, + "content": "[12] Zhe Dong, Yuzhe Sun, Yanfeng Gu, and Tianzhu Liu. Cross-modal bidirectional interaction model for referring remote sensing image segmentation. arXiv preprint arXiv:2410.08613, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.144, + 0.829, + 0.187 + ], + "angle": 0, + "content": "[13] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. International journal of computer vision, 88:303-338, 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.197, + 0.825, + 0.226 + ], + "angle": 0, + "content": "[14] Rafael C Gonzales and Paul Wintz. Digital image processing. Addison-Wesley Longman Publishing Co., Inc., 1987." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.235, + 0.825, + 0.279 + ], + "angle": 0, + "content": "[15] Ritwik Gupta, Richard Hosfelt, Sandra Sajeev, Nirav Patel, Bryce Goodman, Jigar Doshi, Eric Heim, Howie Choset, and Matthew Gaston. xbd: A dataset for assessing building damage from satellite imagery. arXiv preprint arXiv:1911.09296, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.288, + 0.829, + 0.343 + ], + "angle": 0, + "content": "[16] Junwen He, Yifan Wang, Lijun Wang, Huchuan Lu, Jun-Yan He, Jin-Peng Lan, Bin Luo, and Xuansong Xie. Multi-modal instruction tuned llms with fine-grained visual perception. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 13980-13990, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.354, + 0.825, + 0.41 + ], + "angle": 0, + "content": "[17] Ngoc-Vuong Ho, Thinh Phan, Meredith Adkins, Chase Rainwater, Jackson Cothren, and Ngan Le. Rssep: Sequence-to-sequence model for simultaneous referring remote sensing segmentation and detection. In Proceedings of the Asian Conference on Computer Vision, pages 218-231, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.421, + 0.825, + 0.464 + ], + "angle": 0, + "content": "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 108-124. Springer, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.473, + 0.827, + 0.517 + ], + "angle": 0, + "content": "[19] Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4067-4077, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.526, + 0.825, + 0.57 + ], + "angle": 0, + "content": "[20] Zhiwei Hu, Guang Feng, Jiayu Sun, Lihe Zhang, and Huchuan Lu. Bi-directional relationship inferring network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4424-4433, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.578, + 0.827, + 0.635 + ], + "angle": 0, + "content": "[21] Shaofei Huang, Tianrui Hui, Si Liu, Guanbin Li, Yunchao Wei, Jizhong Han, Luoqi Liu, and Bo Li. Referring image segmentation via cross-modal progressive comprehension. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10488-10497, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.645, + 0.827, + 0.689 + ], + "angle": 0, + "content": "[22] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In European Conference on Computer Vision, pages 59-75. Springer, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.697, + 0.825, + 0.741 + ], + "angle": 0, + "content": "[23] Deyi Ji, Feng Zhao, Hongtao Lu, Mingyuan Tao, and Jieping Ye. Ultra-high resolution segmentation with ultra-rich context: A novel benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23621-23630, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.75, + 0.827, + 0.793 + ], + "angle": 0, + "content": "[24] Lixia Ji, Yunlong Du, Yiping Dang, Wenzhao Gao, and Han Zhang. A survey of methods for addressing the challenges of referring image segmentation. Neurocomputing, 583:127599, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.803, + 0.827, + 0.858 + ], + "angle": 0, + "content": "[25] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.869, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[26] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.829, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.134 + ], + "angle": 0, + "content": "[27] Mengcheng Lan, Chaofeng Chen, Yue Zhou, Jiaxing Xu, Yiping Ke, Xinjiang Wang, Litong Feng, and Wayne Zhang. Text4seg: Reimagining image segmentation as text generation. arXiv preprint arXiv:2410.09855, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.826, + 0.187 + ], + "angle": 0, + "content": "[28] Sen Lei, Xinyu Xiao, Tianlin Zhang, Heng-Chao Li, Zhenwei Shi, and Qing Zhu. Exploring fine-grained image-text alignment for referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.195, + 0.825, + 0.239 + ], + "angle": 0, + "content": "[29] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.827, + 0.291 + ], + "angle": 0, + "content": "[30] Kaiyu Li, Ruixun Liu, Xiangyong Cao, Xueru Bai, Feng Zhou, Deyu Meng, and Zhi Wang. Seearth-ov: Towards training-free open-vocabulary segmentation for remote sensing images. arXiv preprint arXiv:2410.01768, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.176, + 0.298, + 0.827, + 0.342 + ], + "angle": 0, + "content": "[31] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS journal of photogrammetry and remote sensing, 159:296-307, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.35, + 0.827, + 0.394 + ], + "angle": 0, + "content": "[32] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.402, + 0.827, + 0.445 + ], + "angle": 0, + "content": "[33] Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.453, + 0.827, + 0.497 + ], + "angle": 0, + "content": "[34] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.506, + 0.827, + 0.55 + ], + "angle": 0, + "content": "[35] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.558, + 0.827, + 0.601 + ], + "angle": 0, + "content": "[36] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In Proceedings of the IEEE international conference on computer vision, pages 1271-1280, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.609, + 0.825, + 0.641 + ], + "angle": 0, + "content": "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.648, + 0.825, + 0.704 + ], + "angle": 0, + "content": "[38] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18653-18663, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.713, + 0.825, + 0.757 + ], + "angle": 0, + "content": "[39] Si Liu, Tianrui Hui, Shaofei Huang, Yunchao Wei, Bo Li, and Guanbin Li. Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):4761-4775, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.765, + 0.827, + 0.821 + ], + "angle": 0, + "content": "[40] Sihan Liu, Yiwei Ma, Xiaqing Zhang, Haowei Wang, Jiayi Ji, Xiaoshuai Sun, and Rongrong Ji. Rotated multi-scale interaction network for referring remote sensing image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26658-26668, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.831, + 0.827, + 0.875 + ], + "angle": 0, + "content": "[41] Sun-Ao Liu, Yiheng Zhang, Zhaofan Qiu, Hongtao Xie, Yongdong Zhang, and Ting Yao. Caris: Context-aware referring image segmentation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 779-788, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.883, + 0.825, + 0.913 + ], + "angle": 0, + "content": "[42] Xu Liu and Zhouhui Lian. Rsunivlm: A unified vision language model for remote sensing via granularity-oriented mixture of experts. arXiv preprint arXiv:2412.05679, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.135 + ], + "angle": 0, + "content": "[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF international conference on computer vision, pages 10012-10022, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.144, + 0.827, + 0.201 + ], + "angle": 0, + "content": "[44] Yang Long, Gui-Song Xia, Shengyang Li, Wen Yang, Michael Ying Yang, Xiao Xiang Zhu, Liangpei Zhang, and Deren Li. On creating benchmark dataset for aerial image interpretation: Reviews, guidances, and million-aid. IEEE Journal of selected topics in applied earth observations and remote sensing, 14:4205–4230, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.21, + 0.825, + 0.254 + ], + "angle": 0, + "content": "[45] Siqi Lu, Junlin Guo, James R Zimmer-Dauphinee, Jordan M Nieusma, Xiao Wang, Steven A Wernke, Yuankai Huo, et al. Vision foundation models in remote sensing: A survey. IEEE Geoscience and Remote Sensing Magazine, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.263, + 0.825, + 0.306 + ], + "angle": 0, + "content": "[46] Xiaoqiang Lu, Binqiang Wang, Xiangtao Zheng, and Xuelong Li. Exploring models and data for remote sensing image caption generation. IEEE Transactions on Geoscience and Remote Sensing, 56(4):2183-2195, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.315, + 0.825, + 0.358 + ], + "angle": 0, + "content": "[47] Edgar Margffoy-Tuay, Juan C Pérez, Emilio Botero, and Pablo Arbeláez. Dynamic multimodal instance segmentation guided by natural language queries. In Proceedings of the European Conference on Computer Vision (ECCV), pages 630–645, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.368, + 0.825, + 0.411 + ], + "angle": 0, + "content": "[48] Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.421, + 0.827, + 0.464 + ], + "angle": 0, + "content": "[49] Sayan Nag, Koustava Goswami, and Srikrishna Karanam. Safari: Adaptive sequence tr a ns f ormer for we a kly supervised r eferring expression segmentat i on. In European Conference on Computer Vision, pages 485-503. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.473, + 0.827, + 0.515 + ], + "angle": 0, + "content": "[50] Ruizhe Ou, Yuan Hu, Fan Zhang, Jiaxin Chen, and Yu Liu. Geopix: Multi-modal large language model for pixel-level image understanding in remote sensing. arXiv preprint arXiv:2501.06828, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.526, + 0.827, + 0.57 + ], + "angle": 0, + "content": "[51] Chao Pang, Xingxing Weng, Jiang Wu, Jiayu Li, Yi Liu, Jiaxing Sun, Weijia Li, Shuai Wang, Litong Feng, Gui-Song Xia, et al. Vhm: Versatile and honest vision language model for remote sensing image analysis. arXiv preprint arXiv:2403.20213, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.578, + 0.827, + 0.636 + ], + "angle": 0, + "content": "[52] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.645, + 0.825, + 0.702 + ], + "angle": 0, + "content": "[53] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13009-13018, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.712, + 0.827, + 0.755 + ], + "angle": 0, + "content": "[54] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.764, + 0.827, + 0.807 + ], + "angle": 0, + "content": "[55] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26374-26383, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.817, + 0.827, + 0.859 + ], + "angle": 0, + "content": "[56] Esther Rolf, Konstantin Klemmer, Caleb Robinson, and Hannah Kerner. Mission critical-satellite data is a distinct modality in machine learning. arXiv preprint arXiv:2402.01444, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.869, + 0.825, + 0.911 + ], + "angle": 0, + "content": "[57] Akashah Shabbir, Mohammed Zumri, Mohammed Bennamoun, Fahad S Khan, and Salman Khan. Geopixel: Pixel grounding large multimodal model in remote sensing. arXiv preprint arXiv:2501.13925, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.135 + ], + "angle": 0, + "content": "[58] Chao Shang, Zichen Song, Heqian Qiu, Lanxiao Wang, Fanman Meng, and Hongliang Li. Prompt-driven referring image segmentation with instance contrasting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4124-4134, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.141, + 0.826, + 0.172 + ], + "angle": 0, + "content": "[59] Claude E Shannon. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423, 1948." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.178, + 0.827, + 0.222 + ], + "angle": 0, + "content": "[60] Hengcan Shi, Hongliang Li, Fanman Meng, and Qingbo Wu. Key-word-aware network for referring expression image segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 38-54, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.227, + 0.827, + 0.272 + ], + "angle": 0, + "content": "[61] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsenko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.277, + 0.827, + 0.322 + ], + "angle": 0, + "content": "[62] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.328, + 0.827, + 0.356 + ], + "angle": 0, + "content": "[63] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.364, + 0.827, + 0.42 + ], + "angle": 0, + "content": "[64] Fengxiang Wang, Hongzhen Wang, Mingshuo Chen, Di Wang, Yulin Wang, Zonghao Guo, Qiang Ma, Long Lan, Wenjing Yang, Jing Zhang, et al. Xlrs-bench: Could your multimodal llms understand extremely large ultra-high-resolution remote sensing imagery? arXiv preprint arXiv:2503.23771, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.428, + 0.825, + 0.47 + ], + "angle": 0, + "content": "[65] Junchi Wang and Lei Ke. Llm-seg: Bridging image segmentation and large language model reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1765-1774, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.478, + 0.827, + 0.533 + ], + "angle": 0, + "content": "[66] Junjue Wang, Zhuo Zheng, Zihang Chen, Ailong Ma, and Yanfei Zhong. Earthvqa: Towards queryable earth via relational reasoning-based remote sensing visual question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 5481-5489, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.542, + 0.827, + 0.584 + ], + "angle": 0, + "content": "[67] Junjue Wang, Zhuo Zheng, Ailong Ma, Xiaoyan Lu, and Yanfei Zhong. Loveda: A remote sensing land-cover dataset for domain adaptive semantic segmentation. arXiv preprint arXiv:2110.08733, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.592, + 0.827, + 0.647 + ], + "angle": 0, + "content": "[68] Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36:61501-61513, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.656, + 0.825, + 0.699 + ], + "angle": 0, + "content": "[69] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11686-11695, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.706, + 0.827, + 0.748 + ], + "angle": 0, + "content": "[70] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.756, + 0.827, + 0.799 + ], + "angle": 0, + "content": "[71] Cong Wei, Yujie Zhong, Haoxian Tan, Yingsen Zeng, Yong Liu, Zheng Zhao, and Yujiu Yang. Instructseg: Unifying instructed visual segmentation with multi-modal large language models. arXiv preprint arXiv:2412.14006, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.806, + 0.825, + 0.849 + ], + "angle": 0, + "content": "[72] Jiannan Wu, Yi Jiang, Peize Sun, Zehuan Yuan, and Ping Luo. Language as queries for referring video object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4974-4984, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.856, + 0.825, + 0.912 + ], + "angle": 0, + "content": "[73] Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Zhe Chen, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionlm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. Advances in Neural Information Processing Systems, 37:69925-69975, 2025." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.912 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.826, + 0.121 + ], + "angle": 0, + "content": "[74] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. IEEE Transactions on Image Processing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.127, + 0.827, + 0.172 + ], + "angle": 0, + "content": "[75] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3858-3869, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.178, + 0.827, + 0.223 + ], + "angle": 0, + "content": "[76] Bin Xiao, Haiping Wu, Weijian Xu, Xiyang Dai, Houdong Hu, Yumao Lu, Michael Zeng, Ce Liu, and Lu Yuan. Florence-2: Advancing a unified representation for a variety of vision tasks (2023). URL https://arxiv.org/abs/2311.06242, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.228, + 0.827, + 0.285 + ], + "angle": 0, + "content": "[77] Zunnan Xu, Zhihong Chen, Yong Zhang, Yibing Song, Xiang Wan, and Guanbin Li. Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17503-17512, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.293, + 0.825, + 0.336 + ], + "angle": 0, + "content": "[78] Cilin Yan, Haochen Wang, Shilin Yan, Xiaolong Jiang, Yao Hu, Guoliang Kang, Weidi Xie, and Efstratios Gavves. Visa: Reasoning video object segmentation via large language models. In European Conference on Computer Vision, pages 98-115. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.343, + 0.825, + 0.386 + ], + "angle": 0, + "content": "[79] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.393, + 0.827, + 0.438 + ], + "angle": 0, + "content": "[80] Senqiao Yang, Tianyuan Qu, Xin Lai, Zhuotao Tian, Bohao Peng, Shu Liu, and Jiaya Jia. Lisa++: An improved baseline for reasoning segmentation with large language model. arXiv preprint arXiv:2312.17240, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.444, + 0.827, + 0.487 + ], + "angle": 0, + "content": "[81] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18155–18165, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.494, + 0.827, + 0.537 + ], + "angle": 0, + "content": "[82] Linli Yao, Lei Li, Shuhuai Ren, Lean Wang, Yuanxin Liu, Xu Sun, and Lu Hou. Deco: Decoupling token compression from semantic abstraction in multimodal large language models. arXiv preprint arXiv:2405.20985, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.544, + 0.825, + 0.588 + ], + "angle": 0, + "content": "[83] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10502–10511, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.595, + 0.825, + 0.638 + ], + "angle": 0, + "content": "[84] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.645, + 0.825, + 0.676 + ], + "angle": 0, + "content": "[85] Zhenghang Yuan, Lichao Mou, Yuansheng Hua, and Xiao Xiang Zhu. Rrsis: Referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.682, + 0.827, + 0.725 + ], + "angle": 0, + "content": "[86] Yang Zhan, Zhitong Xiong, and Yuan Yuan. Rsvg: Exploring data and models for visual grounding on remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 61:1-13, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.731, + 0.827, + 0.762 + ], + "angle": 0, + "content": "[87] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.769, + 0.825, + 0.811 + ], + "angle": 0, + "content": "[88] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. In International Conference on Machine Learning, pages 60116-60133. PMLR, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.818, + 0.825, + 0.862 + ], + "angle": 0, + "content": "[89] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Chen Change Loy, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. Advances in Neural Information Processing Systems, 37:71737-71767, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.869, + 0.827, + 0.913 + ], + "angle": 0, + "content": "[90] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14227-14238, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.135 + ], + "angle": 0, + "content": "[91] Yuxuan Zhang, Tianheng Cheng, Rui Hu, Lei Liu, Heng Liu, Longjin Ran, Xiaoxin Chen, Wenyu Liu, and Xinggang Wang. Evf-sam: Early vision-language fusion for text-prompted segment anything model. arXiv preprint arXiv:2406.20076, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.827, + 0.185 + ], + "angle": 0, + "content": "[92] Zheng Zhang, Yeyao Ma, Enming Zhang, and Xiang Bai. Psalm: Pixelwise segmentation with large multi-modal model. In European Conference on Computer Vision, pages 74-91. Springer, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.194, + 0.827, + 0.238 + ], + "angle": 0, + "content": "[93] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.246, + 0.827, + 0.288 + ], + "angle": 0, + "content": "[94] Li Zhou, Xu Yuan, Zenghui Sun, Zikun Zhou, and Jingsong Lan. Instruction-guided multi-granularity segmentation and captioning with large multimodal model. arXiv preprint arXiv:2409.13407, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.297, + 0.827, + 0.34 + ], + "angle": 0, + "content": "[95] Yue Zhou, Mengcheng Lan, Xiang Li, Yiping Ke, Xue Jiang, Litong Feng, and Wayne Zhang. Geoground: A unified large vision-language model. for remote sensing visual grounding. arXiv preprint arXiv:2411.11904, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.34 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.091, + 0.251, + 0.105 + ], + "angle": 0, + "content": "A Data" + }, + { + "type": "title", + "bbox": [ + 0.173, + 0.123, + 0.409, + 0.137 + ], + "angle": 0, + "content": "A.1 Annotation of EarthReason" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.149, + 0.828, + 0.22 + ], + "angle": 0, + "content": "Each sample of the EarthReason benchmark consists of an image, a corresponding mask, and six reasoning queries along with their respective answers. Given that our metadata is derived from classification datasets, we employed GPT-4o and GPT-3.5 to generate textual annotations, and invited multiple remote sensing and vision experts to provide accurate and reliable mask annotations. Overall, our annotation process consists of the following three steps:" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.233, + 0.826, + 0.289 + ], + "angle": 0, + "content": "- Step-1: To fully leverage the powerful multimodal capabilities and extensive geographic knowledge of GPT-4o, we carefully design the prompt, which is then provided alongside images and their corresponding category labels to generate a reasoning question-answer pair. The prompt is illustrated in Figure 6." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.296, + 0.826, + 0.339 + ], + "angle": 0, + "content": "- Step-2: To avoid homogeneous question-answer formats under a single prompt, we further employ the textual capabilities of GPT-3.5 to expand each generated question into six variations and each answer into three alternatives. The prompt used for this expansion is shown in Figure 7." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.345, + 0.827, + 0.47 + ], + "angle": 0, + "content": "- Step-3: Unlike previous methods that rely on semi-automatic mask annotation based on off-the-shelf bounding boxes or masks, we invite multiple remote sensing vision experts to perform accurate and efficient mask annotation guided by the generated questions. To further improve annotation efficiency, we incorporate SAM-H as an auxiliary tool for some simple targets. Subsequently, we perform cross-validation of the annotation results and re-associate the samples that do not meet the quality standards. As shown in Figure 8, (a), (b), and (c), derived from the RRSIS-D dataset, illustrate the masks of semi-automatic annotation based on bounding boxes. (a) and (c) exhibit noticeable annotation errors, while in (b), the query does not align with the annotation. (d), (e), and (f) illustrate our high-quality manual annotations." + }, + { + "type": "list", + "bbox": [ + 0.182, + 0.233, + 0.827, + 0.47 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.503, + 0.625, + 0.559 + ], + "angle": 0, + "content": "Prompt: You are an expert in geographic remote sensing imagery. Please fully analyze the geographical landscape and cultural features in remote sensing images. Generate an implicit reasoning questions based on given object categories. Please use your imagination and feel free to change the sentence structure or add a situation description. Just give the implicit reasoning questions that meet the requirements. The descriptions must refer to the natural landscapes and cultural landscapes shown in remote sensing images. The output implicit reasoning questions need to meet the following requirements:" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.559, + 0.625, + 0.585 + ], + "angle": 0, + "content": "(1) Please imagine the scene and output an implicit reasoning question to describe the attributes or functions of the given object. The output question must have a certain degree of reasoning difficulty and be helpful to humans." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.586, + 0.625, + 0.613 + ], + "angle": 0, + "content": "(2) Do not explicitly write the name or description of the target object in the original text. Questions should be asked in the form of asking where, which infrastructure, how to do a certain activity, which location, what object." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.614, + 0.625, + 0.641 + ], + "angle": 0, + "content": "(3) The output cannot be redundant. Just give one question and its answer that you think has the highest confidence. The question should be at least 18 words. The answer requires giving the name of the given object and then explaining why the answer is this, about 20 words." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.641, + 0.625, + 0.658 + ], + "angle": 0, + "content": "(4) Do not output any redundant information except the question and the answer, and separate them with line break." + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.659, + 0.434, + 0.669 + ], + "angle": 0, + "content": "The given object category is . " + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.671, + 0.229, + 0.687 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.641, + 0.512, + 0.807, + 0.634 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.69, + 0.787, + 0.701 + ], + "angle": 0, + "content": "Question: If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?" + }, + { + "type": "text", + "bbox": [ + 0.181, + 0.704, + 0.809, + 0.724 + ], + "angle": 0, + "content": "Answer: The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response." + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.745, + 0.825, + 0.772 + ], + "angle": 0, + "content": "Figure 6: The illustration of the prompt construction process for generating question-answer pairs for geospatial pixel reasoning." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.802, + 0.376, + 0.816 + ], + "angle": 0, + "content": "A.2 EarthReason Statistics" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.829, + 0.825, + 0.913 + ], + "angle": 0, + "content": "The EarthReason benchmark comprises 28 categories, and the number of samples in each category is shown in Figure 9 (a). It can be observed that the distribution of the 28 categories is relatively balanced. Figure 9 (b), (c), and (d) illustrate the category distributions in the training, validation, and test sets, respectively. To evaluate the model's generalization capability, we specifically excluded four categories—\"basketball court\", \"island\", \"lake\", and \"stadium\"—from the training set. Moreover, we introduced 119 empty target samples to mitigate potential hallucinations of the model." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.18, + 0.097, + 0.815, + 0.127 + ], + "angle": 0, + "content": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 6 related questions or topics. The given question is \"If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?\"" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.132, + 0.226, + 0.146 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.152, + 0.795, + 0.21 + ], + "angle": 0, + "content": "Response: 1. In case of an earthquake, where can individuals find a clear and accessible area nearby to assemble safely? 2. During an earthquake, what spacious and unobstructed area close by would provide the safest environment for all of us to meet? 3. Should an earthquake happen, where in the vicinity can people find an open area to come together safely and securely? 4. In the scenario of an earthquake, where can residents find a secure area with open space and accessible paths for evacuation? 5. In an earthquake situation, which nearby area featuring open space and clear pathways would be the safest choice for resident evacuation? 6. If an earthquake occurs, which types of infrastructure are anticipated to function as dependable gathering spots with adequate open areas?" + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.226, + 0.81, + 0.255 + ], + "angle": 0, + "content": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 3 related questions or topics. The given description is \"The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response.\"" + }, + { + "type": "image", + "bbox": [ + 0.183, + 0.264, + 0.23, + 0.28 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.286, + 0.8, + 0.326 + ], + "angle": 0, + "content": "Response: 1. With minimal obstructions and easy access to main roads, the ground track field offers both safety and convenience, positioning it as the most reliable site in terms of hazard prevention and emergency preparedness. 2. The expansive open space of the track field provides a secure environment, greatly minimizing the chance of falling objects. This makes it an ideal refuge from any potential dangers. 3. With its generous outdoor space devoid of any encroaching structures, the ground track field emerges as the premier choice, ensuring an environment where safety is prioritized." + }, + { + "type": "image_caption", + "bbox": [ + 0.214, + 0.341, + 0.784, + 0.356 + ], + "angle": 0, + "content": "Figure 7: The illustration of the prompt construction process for expand question-answer pairs." + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.418, + 0.362, + 0.524 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.204, + 0.527, + 0.336, + 0.538 + ], + "angle": 0, + "content": "(a) query: The gray bridge" + }, + { + "type": "image_caption", + "bbox": [ + 0.403, + 0.405, + 0.593, + 0.417 + ], + "angle": 0, + "content": "Semi-automatic Annotation" + }, + { + "type": "image", + "bbox": [ + 0.409, + 0.419, + 0.588, + 0.525 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.418, + 0.527, + 0.577, + 0.538 + ], + "angle": 0, + "content": "(b) query: The blue storage tank" + }, + { + "type": "image", + "bbox": [ + 0.629, + 0.419, + 0.807, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.619, + 0.525, + 0.818, + 0.537 + ], + "angle": 0, + "content": "(c) query: The gray bridge in the middle" + }, + { + "type": "image", + "bbox": [ + 0.189, + 0.556, + 0.362, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.262, + 0.659, + 0.279, + 0.67 + ], + "angle": 0, + "content": "(d)" + }, + { + "type": "image_caption", + "bbox": [ + 0.431, + 0.544, + 0.566, + 0.556 + ], + "angle": 0, + "content": "Manual Annotation" + }, + { + "type": "image", + "bbox": [ + 0.408, + 0.557, + 0.588, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.49, + 0.66, + 0.506, + 0.67 + ], + "angle": 0, + "content": "(e)" + }, + { + "type": "image", + "bbox": [ + 0.628, + 0.556, + 0.808, + 0.658 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.719, + 0.659, + 0.733, + 0.669 + ], + "angle": 0, + "content": "(f)" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.708 + ], + "angle": 0, + "content": "Figure 8: Comparison of annotation quality. (a), (b) and (c) are from RRSIS-D dataset, (d), (e) and (f) are from our EarthReason dataset." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.765, + 0.5, + 0.782 + ], + "angle": 0, + "content": "B Additional Implementation Details" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.816, + 0.48, + 0.832 + ], + "angle": 0, + "content": "B.1 Details of Training Hyper-parameters" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.826, + 0.914 + ], + "angle": 0, + "content": "Table 8 presents the hyper-parameter settings used during the training of our model. For training on the referring segmentation datasets, we employ only focal loss and dice loss to supervise mask generation. In contrast, for training on geospatial pixel reasoning task, we additionally incorporate the cross-entropy loss from the large language model to supervise text answer generation." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.182, + 0.089, + 0.498, + 0.241 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.218, + 0.242, + 0.465, + 0.255 + ], + "angle": 0, + "content": "(a) category distribution of the EarthReason" + }, + { + "type": "image", + "bbox": [ + 0.502, + 0.089, + 0.818, + 0.242 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.541, + 0.242, + 0.78, + 0.256 + ], + "angle": 0, + "content": "(b) category distribution of the training set" + }, + { + "type": "image", + "bbox": [ + 0.182, + 0.259, + 0.498, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.216, + 0.412, + 0.466, + 0.424 + ], + "angle": 0, + "content": "(c) category distribution of the validation set" + }, + { + "type": "image", + "bbox": [ + 0.503, + 0.259, + 0.818, + 0.41 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.553, + 0.411, + 0.768, + 0.424 + ], + "angle": 0, + "content": "(d) category distribution of the test set" + }, + { + "type": "image_caption", + "bbox": [ + 0.34, + 0.434, + 0.655, + 0.449 + ], + "angle": 0, + "content": "Figure 9: The category distribution of EarthReason." + }, + { + "type": "table_caption", + "bbox": [ + 0.332, + 0.476, + 0.663, + 0.491 + ], + "angle": 0, + "content": "Table 8: The hyper-parameters for model training." + }, + { + "type": "table", + "bbox": [ + 0.296, + 0.491, + 0.704, + 0.687 + ], + "angle": 0, + "content": "
ParametersValue
OptimizerAdamW
Learning Rate1 × 10-4
Batch Size16
Number of Iteration7,610 / 5,400 / 2,220
Learning Rate ScheduleCosine Decay
Weight Decay0.0
Warmup Ratio0.03
β10.9
β20.999
Image Size1024 × 1024
Image ProcessingResize long edge to 1024 and padding short edge to 1024.
" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.715, + 0.292, + 0.733 + ], + "angle": 0, + "content": "C Examples" + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.75, + 0.509, + 0.765 + ], + "angle": 0, + "content": "C.1 More Qualitative Results on EarthReason" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.778, + 0.827, + 0.82 + ], + "angle": 0, + "content": "Figure 10 presents a comparison between SegEarth-R1 and other models on the EarthReason dataset. It can be observed that our model demonstrates a better understanding of long reasoning instructions and produces more accurate mask generation." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.842, + 0.482, + 0.857 + ], + "angle": 0, + "content": "C.2 More Qualitative Results on RRSIS-D" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.87, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Figure 11 presents a comparison between SegEarth-R1 and PSALM on the RRSIS-D dataset. Our model demonstrates a better understanding of direct geographical attributes such as location, color, and size compared to PSALM. This improvement is attributed to the removal of indirect mask" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.172, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "prediction using mask tokens, allowing semantic information (description embeddings) to directly interact with image features to generate masks." + }, + { + "type": "image", + "bbox": [ + 0.177, + 0.142, + 0.821, + 0.721 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.319, + 0.731, + 0.679, + 0.746 + ], + "angle": 0, + "content": "Figure 10: Comparison with other models on EarthReason." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.187, + 0.126, + 0.819, + 0.855 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.342, + 0.867, + 0.655, + 0.882 + ], + "angle": 0, + "content": "Figure 11: Comparison with PSALM on RRSIS-D." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "21" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..762ca6c07056dbfa1913abd28da63035550ca79d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/2fd8d811-7222-4bd3-b7d0-da4f022a4df8_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f77ed9e351ff7ad8423a280b586142eea42e3cbd4cf8f8b82c4aaa5187c3fe70 +size 4284339 diff --git a/data/2025/2504_09xxx/2504.09644/full.md b/data/2025/2504_09xxx/2504.09644/full.md new file mode 100644 index 0000000000000000000000000000000000000000..8d27bd30655411d642f05875e9693730269b871f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/full.md @@ -0,0 +1,431 @@ +# SegEarth-R1: Geospatial Pixel Reasoning via Large Language Model + +Kaiyu Li $^{1,\ast}$ , Zepeng Xin $^{1,\ast}$ , Li Pang $^{1}$ , Chao Pang $^{2}$ , Yupeng Deng $^{3}$ , Jing Yao $^{3}$ , Guisong Xia $^{2}$ , Deyu Meng $^{1}$ , Zhi Wang $^{1}$ , Xiangyong Cao $^{1,\dagger}$ , Xi'an Jiaotong University + +# Abstract + +Remote sensing has become critical for understanding environmental dynamics, urban planning, and disaster management. However, traditional remote sensing workflows often rely on explicit segmentation or detection methods, which struggle to handle complex, implicit queries that require reasoning over spatial context, domain knowledge, and implicit user intent. Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To advance this task, we construct and release the first large-scale benchmark dataset called EarthReason, which comprises 5,434 manually annotated image masks with over 30,000 implicit question-answer pairs. Moreover, we propose SegEarth-R1, a simple yet effective language-guided segmentation baseline that integrates a hierarchical visual encoder, a large language model (LLM) for instruction parsing, and a tailored mask generator for spatial correlation. The design of SegEarth-R1 incorporates domain-specific adaptations, including aggressive visual token compression to handle ultra-high-resolution remote sensing images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Extensive experiments demonstrate that SegEarth-R1 achieves state-of-the-art performance on both reasoning and referring segmentation tasks, significantly outperforming traditional and LLM-based segmentation methods. Our data and code will be released at https://github.com/earth-insights/SegEarth-R1. + +# 1 Introduction + +Earth observation through remote sensing has emerged as a cornerstone of modern geospatial analysis, enabling unprecedented insights into environmental dynamics, urban planning, and disaster management [56, 45]. Satellite and aerial images provide a unique vantage point for monitoring planetary-scale phenomena, ranging from deforestation patterns to coastal erosion. However, converting this raw pixel data into actionable insights requires more than traditional computer vision techniques; it demands models capable of reasoning about spatial context, domain knowledge, and implicit user intent. Conventional remote sensing workflows predominantly rely on explicit tasks, e.g., semantic segmentation and referring segmentation [44, 8, 85], which operate within fixed taxonomies and require precise user instructions. While effective for well-defined scenarios, these approaches struggle to accommodate complex, implicit queries—for example, identifying regions at elevated risk of landslides based on slope, vegetation cover, and proximity to infrastructure. Such tasks limit implicit reasoning over heterogeneous spatial patterns, object relationships, and environmental metadata, exceeding the capabilities of standard segmentation or detection pipelines. + +Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To enable research in this task, + +![](images/e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg) + +![](images/546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg) +A tennis court on the far left +Figure 1: Comparison of semantic segmentation, referring segmentation and geospatial pixel inference. (left) Samples from the LoveDA [67] and RRSIS-D [40] datasets. (right) Samples from the EarthReason dataset. Previous tasks are limited by fixed taxonomies and explicit instructions, while geospatial pixel reasoning supports complex implicit instructions and requires the reasoning capability of the model. + +Geospatial Pixel Reasoning +![](images/e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg) +USER: When there is an urgent necessity for emergency medical services, what designated location facilitates rapid air evacuations for patients? +ASSISTANT: Perched atop the medical complex, the airport helipad facilitates swift helicopter evacuations for patients in critical condition. This strategic location offers immediate access to vital healthcare services, ensuring timely assistance in emergencies. + +we build and release the first large-scale benchmark dataset, called EarthReason, which contains 5,434 manually annotated remote sensing image-mask pairs drawn from diverse classification sources, covering 28 scene categories at spatial resolutions ranging from $0.5\mathrm{m}$ to $153\mathrm{m}$ . Each image is paired with multiple implicit reasoning questions that require the model to infer target masks based on contextual and domain-specific knowledge, rather than explicit object names. In addition, by incorporating empty target cases and varying spatial scales, EarthReason pushes models to generalize across complex real-world remote sensing scenarios. + +Recent progress in multimodal large language models (MLLMs) has demonstrated impressive performance in natural image domains, where models like LISA [26] and PixelLM [55] leverage large language models (LLMs) [62, 7, 79] to interpret rich textual prompts and generate pixel-level outputs. These frameworks excel at tasks such as reasoning segmentation [26], where the target mask is not directly specified but must be inferred from nuanced language cues. Unfortunately, directly transferring these methods to geospatial pixel reasoning is non-trivial since remote sensing images present extreme scale variation, densely packed small-scale objects and ultra-high resolution that violate assumptions of natural images. Moreover, different from natural images, remote sensing queries often require spatial correlations. For instance, identifying "informal settlements" relies on detecting roof material irregularities, road network fragmentation, and spatial adjacency to legal land-use zones. + +To address these challenges, we present SegEarth-R1, a simple yet effective language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing, and a tailored mask generator designed for spatial correlation. Further, some components are also designed to adapt to the characteristics of remote sensing images. Specifically, we propose the aggressive visual token compression to handle ultra-high-resolution images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Despite its architectural simplicity, SegEarth-R1 achieves advanced performance on EarthReason and referring segmentation datasets, significantly outperforming both traditional and LLM-based segmentation methods. + +In summary, our contributions are as follows: + +- We introduce the geospatial pixel reasoning task, which requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge. +- We build and release the first large-scale benchmark with 5,434 image-mask pairs, 28 categories, and over 30,000 implicit question-answer pairs, fostering research in geospatial pixel reasoning. + +- We propose an LLM-based segmentation model, SegEarth-R1, which incorporates new segmentation capabilities in remote sensing, containing several domain-specific designs. +- Extensive experiments show that SegEarth-R1 achieves state-of-the-art performance on reasoning and referring segmentation tasks, compared to traditional methods and other LLM-based methods. + +# 2 Related Work + +# 2.1 Referring Segmentation + +Referring segmentation aims to segment targets in an image based on natural language descriptions, requiring precise alignment between linguistic expressions and visual content. Early approaches adopted CNN-RNN/LSTM frameworks [18, 36, 32, 47, 60, 21] to extract visual features and encode textual queries, respectively. However, these methods struggled with complex expressions due to limited local receptive fields and insufficient cross-modal interaction [24]. To address these limitations, attention mechanisms [63] emerged as a pivotal technique [10, 81, 72, 19, 77, 49, 74, 58]. VLT [10] dynamically generates adaptive query vectors based on image-text interactions, enabling precise localization through cross-modal attention. LAVT [81] further advances this paradigm by integrating hierarchical visual-linguistic fusion within a Swin Transformer [43] backbone, where pixel-word attention refines multiscale features to achieve fine-grained semantic alignment. In remote sensing, specifying segmentation for certain instances can improve interpretation efficiency and user interactivity. Recently, Yuan et al. [85] introduced referring segmentation into satellite images for the first time. Subsequently, following the LAVT [81] architecture, RMSIN [40] also incorporated adaptive rotated convolutions to address scale and orientation variations. FIANet [28] and CroBIM [12] introduced elaborate cross-modal interactions for feature alignment. RSSep [17] reformulated referring segmentation as a sequence-to-sequence task, predicting polygonal boundaries to handle scale variations and blurred edges [38]. However, existing methods effectively follow explicit instructions for target segmentation but lack implicit intent reasoning. In this paper, the proposed geospatial pixel reasoning task advances beyond referring segmentation by employing LLMs' reasoning capabilities to interpret subtle instructions and accurately segment desired targets. + +# 2.2 LLM-based Segmentation + +Recent advances in LLMs have significantly expanded their capabilities to integrate pixel-level segmentation with language reasoning [76, 68, 73, 2, 61, 88, 84, 16]. For instance, Florence-2 [76] unified text, detection, and segmentation through a sequence-to-sequence framework with task instructions. To address the complexity of real-world segmentation scenarios, some works focus on architectural specialization and instruction-aware adaptation. LISA [26, 80] established the paradigm by introducing a [SEG] token to connect LLMs with segmentation decoders like SAM [25], enabling language-guided mask prediction. Subsequent studies enhanced this paradigm: GSVA [75] introduced shared-weight [SEG] tokens and [REJ] tokens for multi-target and empty-target handling [35, 55, 90], while GLaMM [53] achieved pixel-grounded conversational capabilities through holistic segmentation [94]. Parallel efforts focused on architectural unification - PSALM [92] established a flexible input schema for multi-task segmentation, and OMG-LLaVA [89] combined universal segmentation backbones with LLMs for pixel-level reasoning. Video understanding extensions emerged through VISA [78] and InstructSeg [71], which integrated temporal reasoning. Notably, Text4Seg [27] redefined segmentation as a text generation problem using semantic descriptors, eliminating the need for an additional decoder. In remote sensing, benefiting from the above paradigms [26, 27], some unified models such as RSUniVLM [42], GeoGround [95] and GeoPix [50] are equipped with segmentation capabilities. Although based on LLM, these models focus only on explicit text-guided segmentation. Further, GeoPixel [57] introduced grounded conversation generation [53] to remote sensing, but it still does not provide reasoning capability. Our SegEarth-R1 also follows the LLM-based segmentation paradigm, but is different from previous methods. Specifically, SegEarth-R1 is the first work to support reasoning about the target region from implicit queries, and its components are specifically designed for the challenges in remote sensing. + +Table 1: Comparison between EarthReason and other related datasets. The gray rendering denotes the natural image dataset. "Seg", "Det", "VG", "Cls" denote segmentation, detection, visual grounding and classification datasets, respectively. + +
DatasetMask LabelReasoning QuerySpatial resolutionImage SizeImage NumImage SourceClass Num
ReasonSeg [26]--1,218OpenImages (Seg) & ScanNetv2 (Seg)-
LLM-Seg40K [65]--14,000LVIS (Seg) & EgoObjects (Seg)-
EarthVQA [66]X0.3m102426,000LoveDA (Seg)14
RegSegRS [85]X0.5m-30m80024,420SkyScapes (Seg)14
RRSIS-D [40]X0.13m512217,402RSVGD (VG) & DIOR (OD)20
RISBench [12]X0.1m-30m512252,472DOTAv2(OD) & DIOR (OD)26
EarthReason0.5m-153m1232-761725,434AID (Cls) & fMoW (Cls)28
+ +# 3 Benchmark Geospatial Pixel Reasoning Dataset—EarthReason + +# 3.1 Comparison with Related Dataset + +We analyze three types of tasks and datasets related to geospatial pixel reasoning, i.e., natural image reasoning segmentation, remote sensing visual question answering (VQA), and remote sensing referring segmentation, as shown in Table 1. RefSegRS [85] and RRSIS-D [40] provide early benchmarks with image-text-mask triplets. RISBench [12], the largest RRSIS dataset to date, introduced 52,472 triplets with oriented bounding boxes and pixel-level masks generated via a semi-automatic pipeline. These datasets address the limitations of earlier text-focused datasets (e.g., RSICD [46], EarthVQA [66], etc.) and enable comprehensive evaluation of multimodal models. Compared to the previous referring segmentation datasets, our EarthReason datasets has the following features: (1) The mask labels in EarthReason are not explicitly specified by the query, but require further reasoning to determine the target, which challenges the model's reasoning ability. (2) EarthReason uses a more raw data source. The previous related datasets directly transform existing segmentation datasets [1, 67] or SAM-processed detection datasets [86, 31, 11], while our EarthReason uses images from classification datasets [44, 8] and we manually annotate them. This allows EarthReason to provide more data gain when it comes to co-training of unified segmentation tasks. (3) EarthReason has more diverse spatial resolutions and image sizes, which are conducive to solving the object scale spanning problem inherent in remote sensing images [56]. Compared to the first natural image reasoning segmentation dataset, ReasonSeg, EarthReason contains $4.46 \times$ more data than it. Therefore, we believe that EarthReason, as the first geospatial pixel reasoning dataset in the remote sensing area, is capable of performing initial explorations of this task. + +# 3.2 Dataset Generation Pipeline + +Our benchmark dataset EarthReason is generated according to the following three steps, i.e., image collection, question-answer pair generation, and object mask labeling. + +Image Collection. As mentioned above, to avoid potential data leakage in the future construction of unified segmentation models for remote sensing, we collect images from existing classification data. Although this increases the annotation cost, it also motivates more diverse scenes. Specifically, we first select the 28 categories that are more suitable for reasoning in the Million-AID [44] dataset, and sample about 200 images for each category. Then, we find that the actual geographic range contained in Million-AID's images is limited. Thus, we also collect 800 images in the fMoW [8] dataset to enhance the model's reasoning ability in complex scenes. Further, to alleviate the factitious illusion issue [51], we add an extra 200 empty target images (i.e., the implied target is not in the image). Finally, some low-quality images are eliminated, and we obtain a total of 5,434 images. + +Question-Answer Pair Generation. We use GPT-4o1 to construct question-answer pairs, and given its excellent visual comprehension, we take the remote sensing image and the corresponding scene category (provided by Million-AID and fMoW) as part of the prompt to generate questions and answers that are closely related to the image. An example of such a prompt is illustrated in Appendix A.1. In addition, following [26], to make the questions and answers diverse, we adapt GPT-3.5 to rephrase the instructional questions and answers, as shown in Appendix Figure 7. + +![](images/f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg) +Figure 2: Overview of the proposed SegEarth-R1 architecture. Given an image $X_{v}$ and a text description $X_{q}$ , a hierarchical visual encoder and a proposed connector are used to extract and compress visual tokens. Then, the visual tokens $\square$ and description embeddings $\square$ are fed into an LLM for instruction interpretation and semantic correlation. Finally, description embeddings are directly mapped to the query vector and used for spatial correlation and segmentation mask generation. + +Object Mask Labeling. Different from previous referring and reasoning segmentation datasets (which use off-the-shelf masks or bounding boxes), we annotate images from scratch. Specifically, we employ multiple experts in remote sensing and vision, assign each expert a few hundred images to annotate, and cross-validate the annotations after they are completed. For simple targets (e.g., lake), SAM-H [25] is used to assist in annotation; for complex targets (e.g., wind turbine), each point of the polygon is finely marked. A description of mask quality is provided in Appendix A.1. + +Dataset Statistics. The EarthReason dataset is partitioned into training, validation, and testing sets, comprising 2,371, 1,135, and 1,928 images, respectively. In the training set, each image is annotated with an average of six questions and three corresponding answers. The average question length is 20.86 words, while the average answer length is 26.76 words. To assess the model's generalization capability, several semantic categories are deliberately reserved for the validation and test sets, ensuring they remain unseen during training. Additional dataset details are provided in the Appendix A.2. + +# 4 Baseline Geospatial Pixel Reasoning Method—SegEarth-R1 + +Compared with natural images, remote sensing images exhibit distinctive characteristics that demand specialized architectural designs for pixel-wise geospatial reasoning. In this work, we propose SegEarth-R1, a simple yet powerful baseline for geospatial pixel reasoning that effectively harnesses LLM capabilities while incorporating domain-specific adaptations. As illustrated in Figure 2, our architecture comprises three core parts: A visual encoder for image feature extraction, an LLM for instruction interpretation and semantic correlation, and a mask generator for spatial correlation and mask prediction. Each part incorporates critical design considerations to address the unique challenges of remote sensing images. + +# 4.1 Hierarchical Visual Encoder + +Satellite and aerial targets present two critical challenges: (1) extreme scale variations ranging from sub-meter objects to kilometer-scale geographical formations [56], and (2) densely distributed small objects requiring high-resolution analysis [30]. Conventional ViT-based encoders adopted in MLLMs [26, 80, 25, 75] (e.g., image encoder in CLIP [52] and SAM [25, 54]) prove suboptimal due to their fixed-scale feature extraction and information compression through aggressive patch merging. To alleviate these limitations, following [92], SegEarth-R1 employs a Swin Transformer [43] backbone enhanced with progressive feature hierarchy construction. This architecture generates multi-scale feature maps $v_{h}, h \in [1,4]$ at $1/4, 1/8, 1/16, 1/32$ of the original resolution through controlled downsampling operations, preserving high-resolution details for small objects while capturing contextual semantics at deeper layers. + +# 4.2 Large Language Model and Input Schema + +SegEarth-R1 adopts the MLLM paradigm [37, 29] by jointly embedding visual tokens and textual instructions into a unified LLM input space for multimodal reasoning. Unlike natural images, remote sensing data exhibits ultra-high-resolution coverage [23, 64], posing computational challenges when processed through billion-level LLMs. Therefore, we expect to compress the visual token to alleviate the computational cost and make only simple semantic correlations in LLM. + +# 4.2.1 Visual Token + +Redundancy Analysis. Image redundancy quantifies the proportion of compressible, non-informative data within an image. To investigate the feasibility of aggressive visual token compression for remote sensing images, we conduct a redundancy analysis from dual perspectives: pixel-level statistical redundancy and spatial structural redundancy. + +- According to information theory [59], entropy measures the average uncertainty or information content of an image, while the maximum entropy corresponds to the idealized scenario where pixel values are uniformly distributed (i.e., no redundancy). Thus, from the entropy perspective, the image redundancy can be defined as [14]: + +$$ +R _ {e} = 1 - \frac {- \sum_ {l = 0} ^ {L - 1} p (l) \log_ {2} p (l)}{\log_ {2} L}, \tag {1} +$$ + +where $L$ denotes the number of distinct intensity levels (e.g., $L = 256$ for an 8-bit grayscale image), and $p(l)$ denotes the probability mass function of the pixel intensity value $l$ . + +- Beyond pixel-level statistical redundancy, structural self-similarity reflects spatial redundancy caused by repetitive patterns (e.g., textures, geometric features). To quantify this, we leverage the Structural Similarity Index Matrix (SSIM) [70] to measure inter-patch similarity. For an image partitioned into $N$ patches, the SSIM matrix $\mathbf{M} \in \mathbb{R}^{N \times N}$ is defined as: + +$$ +\mathbf {M} (i, j) = \frac {(2 \mu_ {i} \mu_ {j} + C _ {1}) (2 \sigma_ {i j} + C _ {2})}{(\mu_ {i} ^ {2} + \mu_ {j} ^ {2} + C _ {1}) (\sigma_ {i} ^ {2} + \sigma_ {j} ^ {2} + C _ {2})}, \quad \forall i, j \in 1, \dots , N \tag {2} +$$ + +where $\mu_{i},\sigma_{i}$ denote the mean and variance of the $i$ -th patch, $\sigma_{ij}$ is the covariance between patches $i$ and $j$ , and $C_1,C_2$ are stability constants. Then, the structural self-similarity redundancy $R_{s}$ is derived by averaging off-diagonal elements of $\mathbf{M}$ : + +$$ +R _ {s} = \frac {1}{N (N - 1)} \sum_ {i \neq j} \mathbf {M} (i, j). \tag {3} +$$ + +We evaluate six benchmark datasets spanning natural images (COCO [3], ADE20K [93], PASCAL [13]) and remote sensing images (LoveDA [67], DeepGlobe [9], xBD [15]) for redundancy analysis. As shown in Figure 3, our analysis reveals two critical findings: 1) Remote sensing images demonstrate $1.9\sim 3.3\times$ higher entropic redundancy than natural images, indicating greater pixel-level + +![](images/83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg) +(a) pixel-level redundancy +(b) spatial structure redundancy + +![](images/fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg) +Figure 3: Redundancy analysis of remote sensing datasets and natural images, and the former exhibits higher redundancy. + +compressibility. 2) The average self-similarity for remote sensing data exceeds natural images by $42.6\%$ , confirming the higher prevalence of repetitive textures and geometric patterns. This insight justifies aggressive token compression for semantic-level comprehension in remote sensing images. + +Token Compression Connector. In modern MLLM, connectors such as Q-Former [29] and MLP [37] are designed to transform visual tokens into a multi-modal space. However, some works [4, 82] point out that Q-Former may lead to loss of vision information and is difficult to train. Therefore, in SegEarth-R1, we follow the MLP connector fashion in LLaVA [37] and use a simple but effective connector, i.e., stacked convolutional blocks and Layer Normalization (LN). Here, convolutional + +blocks are used for spatial down-sampling to compress the size of the feature map, and LN is used to stabilize cross-modal training. Specifically, our connector can be formulated as: + +$$ +v _ {o u t} = \left(\operatorname {C o n v} \circ L N\right) ^ {d} \left(v _ {4}\right), \tag {4} +$$ + +where $\circ$ denotes the function composition operator, and $d$ denotes the number of stacked layers. + +# 4.2.2 Text Instruction + +Although the instructions involved in geospatial pixel reasoning are implicit and contain more words than referring segmentation, they still maintain the same data format. Therefore, it is easy to convert them into question-answer pairs using a template like "USER: This is an image , please doing geospatial pixel reasoning according to the following instruction: . ASSISTANT: MethodVisual EncoderLLM TypecloUgloUValTestValTestLISA [26]CLIP-LVicuna-7B [7]57.3959.1061.0460.88PixelLM [55]CLIP-LVicuna-7B [7]57.7959.2257.9460.01PSALM [92]Swin-Bphi-1.5 (1.3B) [33]62.0364.6166.6168.30SegEarth-R1Swin-Bphi-1.5 (1.3B) [33]64.1368.2568.6070.75 + +LISA and PixelLM demonstrate comparable performance; however, despite leveraging larger LLM or MLLM, the quality of their predicted segmentation masks remains suboptimal. This can be primarily attributed to their reliance on CLIP as the visual encoder, which tends to diminish the representation of small-scale geospatial targets. As one of the baselines of SegEarth-R1, PSALM achieves notable improvements over LISA and PixelLM. Nevertheless, PSALM does not adequately incorporate LLM-based segmentation and the Mask2Former paradigm, and lacks considerations for overhead images. SegEarth-R1 achieves the best results on both metrics surpassing PSALM by $3.64\%$ and $2.45\%$ on the test set. Importantly, SegEarth-R1 uses fewer visual tokens in LLM and reduces the number of queries in the mask generator, thus providing a lower inference cost. + +We conduct a comparative evaluation of SOTA LLM-based methods and SegEarth-R1 on the Earth-Reason dataset. As shown in Table 2, all models are trained solely on the training split of EarthReason to ensure a fair comparison. + +# 5.3 Referring Segmentation Results + +SegEarth-R1 also supports basic explicit language-guided segmentation. As shown in Table 3, we compare its performance with existing SOTA traditional methods (not based on LLM) as well as recent LLM-based methods. Notably, prior to SegEarth-R1, LLM-based methods consistently underperformed in comparison to traditional methods on the referring segmentation task. For instance, the advanced GeoGround [95] lags behind RMSIN [40] by $3.7\%$ in terms of gIoU on the RRSIS-D dataset. In contrast, SegEarth-R1, as a universal LLM-based language-guided segmentation method, surpasses traditional methods on the referring segmentation task for the first time with a $2.2\%$ improvement. This result highlights the enhanced general + +Table 3: Referring segmentation results among SegEarth-R1 and previous related works on RRSIS-D dataset. + +
MethodP@0.5cloUgloU
ValTestValTestValTest
Traditional method:
RRN [32]CVPR'1851.0951.0766.5366.4346.0645.64
CSMC [83]CVPR'1955.6855.3269.3969.3948.8548.54
LSCM [22]ECCV'2057.1256.0269.0569.2850.3649.92
CMPC [21]CVPR'2057.9355.8369.2269.3950.4149.24
BRINet [20]CVPR'2058.7956.9070.7369.8851.1449.65
CMPC+ [39]TPAMI'2059.1957.6570.1468.6451.4150.24
LGCE [85]TGRS'2468.1067.6576.6876.3460.1659.37
RIS-DMMI [19]CVPR'2370.4068.7477.0176.2060.7260.12
LAVT [81]CVPR'2269.5469.5277.5977.1961.4661.04
RMSIN [40]CVPR'2474.6674.2678.2777.7965.1064.20
LLM-based method:
LISA [26]CVPR'2427.0724.51--27.8426.78
PixelLM [55]CVPR'2433.4628.81--33.8931.65
NEXT-Chat [87]arXiv'2328.9726.37--26.9824.98
GeoGround [95]arXiv'2568.6967.50--61.1060.50
SegEarth-R178.6276.9678.9278.0167.5666.40
+ +ization capability and practical potential of SegEarth-R1. On the RefSegRS dataset, the improvement of SegEarth-R1 is more significant than the previous method, with an $8.33\%$ and $9.87\%$ improvement over RMSIN on the validation and testing sets, respectively, as listed in Table 4. + +Table 4: Referring segmentation results among SegEarth-R1 and previous related works on RefSegRS dataset. + +
MethodP@0.5P@0.6P@0.7P@0.8P@0.9cloUgloU
ValTestValTestValTestValTestValTestValTestValTest
BRINet [20]CVPR'2036.8620.7235.5314.2619.939.8710.662.982.841.1461.5958.2238.7331.51
LSCM [22]ECCV'2056.8231.5441.2420.4121.859.5112.115.292.510.8462.8261.2740.5935.54
CMPC [21]CVPR'2046.0932.3626.4514.1412.766.557.421.761.390.2263.5555.3942.0840.63
CMSA [83]CVPR'1939.2428.0738.4420.2520.3912.7111.795.611.520.8365.8464.5343.6241.47
RRN [32]CVPR'1855.4330.2642.9823.0123.1114.8713.727.172.640.9869.2465.0650.8141.88
EVF-SAM [91]Arxiv'2457.7735.1737.5922.3416.249.364.872.861.860.3959.6155.5146.9836.64
CMPC+ [39]TPAMI'2156.8449.1937.5928.3120.4215.3110.678.122.780.5570.6266.5347.1343.65
CARIS [41]ACMMM'2368.4545.4047.1027.1925.5215.0814.628.873.711.9875.7969.7454.3042.66
CRIS [69]CVPR'2253.1335.7736.1924.1124.3614.3611.836.382.551.2172.1465.8753.7443.26
LAVT [81]CVPR'2280.9751.8458.7030.2731.0917.3415.559.524.642.0978.5071.8661.5347.40
RIS-DMMI [19]CVPR'2386.1763.8974.7144.3038.0519.8118.106.493.251.0074.0268.5865.7252.15
LGCE [85]TGRS'2490.7273.7586.3161.1471.9339.4632.9516.0210.215.4583.5676.8172.5159.96
RMSIN [40]CVPR'2493.9779.2089.3365.9974.2542.9829.7016.517.893.2582.4175.7273.8462.58
SegEarth-R195.8286.3093.2779.5388.8669.5778.1948.8722.0410.7385.0179.0082.1772.45
+ +Figure 5: Qualitative Results of SegEarth-R1 on EarthReason. More results can be found in Appendix C. +![](images/97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg) +USER: In this region, what infrastructure changes could best support an extended growing season for local agricultural crops? ASISTANT: The greenhouse creates a controlled environment that shields plants from harsh weather, harmful insects, and infectious diseases, all of which can threaten the health of the plant. + +![](images/bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg) +USER: Which types of facilities would aid small boat docking in this coastal area, while also contributing positively to tourism and local fisheries? +ASSISTANT: A pier is a man-made construction that extends over the water's surface, acting as a convenient spot for boats to moor while also providing easy access for passengers to get on and off. It serves as a vital hub for the maritime industry, ensuring smooth operations and timely delivery of products. + +![](images/64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg) +USER: What locations facilitate the takeoff and landing of aircraft under supervision from air traffic controllers in a regulated setting? +ASSISTANT: The airport runway is meticulously crafted to facilitate the safe ascent and descent of aircraft, ensuring that every flight begins and ends with precision. + +# 5.4 Ablation Study + +Table 5: Ablation of SegEarth-R1 components on EarthReason: query description embedding (Query D.E.), description projector $(D$ -Projectile), token compression connector (T.C. Connector). + +
Query D.E.D-ProjectorT.C. ConnectorcloUgloU
ValTestValTest
XXX62.0364.6166.6168.30
XX63.3466.1967.4269.15
XX63.3266.3167.2269.21
XX63.4765.4168.3169.20
X64.1266.7168.6169.61
64.1368.2568.6070.75
+ +Table 6: Ablation of LLM type on RRSIS-D. + +
LLM TypecloUgloU
ValTestValTest
phi-1.5 (1.3B)78.9278.0167.5666.40
phi-2 (2B)78.9878.3567.9166.67
Qwen2.5 (0.5B)78.5377.8767.7066.49
+ +Table 7: Ablation of $d$ on EarthReason Val set. + +
d#Visual TokengIoU| | d#Visual TokengIoU
0102468.2826468.60
125668.4731668.22
+ +Components. We conduct ablation studies on the EarthReason dataset to evaluate the effectiveness of the novel components involved in SegEarth-R1. As listed in Table 5, the first row shows the results of the PSALM baseline. Each proposed component contributes to performance enhancement, yielding improvements ranging from $0.85\%$ to $0.9\%$ . The T.C. Connector and Query D.E. not only enhances performance but also reduces computational overhead. Further, the proposed components can be well coupled, and when they are all activated, i.e., complete SegEarth-R1, all metrics exhibit substantial gains over the baseline, confirming the effectiveness and compatibility of the proposed design. In fact, although these components are initially designed with remote sensing scenarios in mind, their underlying principles offer transferable insights applicable to general image understanding. + +LLM Type. Given the limited scale of the dataset, we select some small LLM for comparison, as presented in Table 6. SegEarth-R1 demonstrates consistently high performance across different LLM, indicating the robustness and architectural stability of the overall framework. Notably, with Qwen2.5 (0.5B) [79], it still achieves competitive results, indicating its potential for edge deployment. + +Layer Number of T.C. Connector. The layer number $d$ controls the number of visual tokens fed into the LLM. As shown in Table 7, increasing token quantity does not improve performance. This observation aligns with our earlier analysis, suggesting that appropriate compression of visual tokens is beneficial for the global understanding of a remote sensing image. In SegEarth-R1, spatial correlations between the image and the instruction are primarily handled by the mask generator, while the LLM is only responsible for relatively semantic correlations. This division of labor allows for more efficient use of computational resources without compromising performance. + +# 6 Conclusion + +In this paper, we introduce geospatial pixel reasoning, a new task in remote sensing that requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge. To enable research in this direction, we present EarthReason, the first large-scale benchmark dataset that emphasises complex reasoning scenarios. To address the distinct challenges inherent in remote sensing, we propose SegEarth-R1, a language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing and semantic correlation, and a tailored mask generator designed for spatial correlation. Extensive experiments validate SegEarth-R1's superiority, achieving SOTA performance on both geospatial pixel reasoning and referring segmentation tasks. This work pioneers the fusion of natural language reasoning with pixel-level geospatial analysis, offering transformative potential for applications like environmental monitoring and disaster response. + +# References + +[1] Seyed Majid Azimi, Corentin Henry, Lars Sommer, Arne Schumann, and Eleonora Vig. Skyscapes fine-grained semantic understanding of aerial scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7393-7403, 2019. +[2] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024. +[3] Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1209-1218, 2018. +[4] Junbum Cha, Wooyoung Kang, Jonghwan Mun, and Byungseok Roh. Honeybee: Locality-enhanced projector for multimodal llm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13817-13827, 2024. +[5] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1290–1299, 2022. +[6] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in neural information processing systems, 34:17864-17875, 2021. +[7] Wei-Lin Chiang, Zhuohan Li, Ziqing Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with $90\%$ * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023. +[8] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. Functional map of the world. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6172-6180, 2018. +[9] Ilke Demir, Krzysztof Koperski, David Lindenbaum, Guan Pang, Jing Huang, Saikat Basu, Forest Hughes, Devis Tuia, and Ramesh Raskar. Deep globe 2018: A challenge to parse the earth through satellite images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 172-181, 2018. +[10] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16321-16330, 2021. +[11] Jian Ding, Nan Xue, Gui-Song Xia, Xiang Bai, Wen Yang, Michael Ying Yang, Serge Belongie, Jiebo Luo, Mihai Datcu, Marcello Pelillo, et al. Object detection in aerial images: A large-scale benchmark and challenges. IEEE transactions on pattern analysis and machine intelligence, 44(11):7778-7796, 2021. + +[12] Zhe Dong, Yuzhe Sun, Yanfeng Gu, and Tianzhu Liu. Cross-modal bidirectional interaction model for referring remote sensing image segmentation. arXiv preprint arXiv:2410.08613, 2024. +[13] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. International journal of computer vision, 88:303-338, 2010. +[14] Rafael C Gonzales and Paul Wintz. Digital image processing. Addison-Wesley Longman Publishing Co., Inc., 1987. +[15] Ritwik Gupta, Richard Hosfelt, Sandra Sajeev, Nirav Patel, Bryce Goodman, Jigar Doshi, Eric Heim, Howie Choset, and Matthew Gaston. xbd: A dataset for assessing building damage from satellite imagery. arXiv preprint arXiv:1911.09296, 2019. +[16] Junwen He, Yifan Wang, Lijun Wang, Huchuan Lu, Jun-Yan He, Jin-Peng Lan, Bin Luo, and Xuansong Xie. Multi-modal instruction tuned llms with fine-grained visual perception. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 13980-13990, 2024. +[17] Ngoc-Vuong Ho, Thinh Phan, Meredith Adkins, Chase Rainwater, Jackson Cothren, and Ngan Le. Rssep: Sequence-to-sequence model for simultaneous referring remote sensing segmentation and detection. In Proceedings of the Asian Conference on Computer Vision, pages 218-231, 2024. +[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 108-124. Springer, 2016. +[19] Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4067-4077, 2023. +[20] Zhiwei Hu, Guang Feng, Jiayu Sun, Lihe Zhang, and Huchuan Lu. Bi-directional relationship inferring network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4424-4433, 2020. +[21] Shaofei Huang, Tianrui Hui, Si Liu, Guanbin Li, Yunchao Wei, Jizhong Han, Luoqi Liu, and Bo Li. Referring image segmentation via cross-modal progressive comprehension. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10488-10497, 2020. +[22] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In European Conference on Computer Vision, pages 59-75. Springer, 2020. +[23] Deyi Ji, Feng Zhao, Hongtao Lu, Mingyuan Tao, and Jieping Ye. Ultra-high resolution segmentation with ultra-rich context: A novel benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23621-23630, 2023. +[24] Lixia Ji, Yunlong Du, Yiping Dang, Wenzhao Gao, and Han Zhang. A survey of methods for addressing the challenges of referring image segmentation. Neurocomputing, 583:127599, 2024. +[25] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023. +[26] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024. + +[27] Mengcheng Lan, Chaofeng Chen, Yue Zhou, Jiaxing Xu, Yiping Ke, Xinjiang Wang, Litong Feng, and Wayne Zhang. Text4seg: Reimagining image segmentation as text generation. arXiv preprint arXiv:2410.09855, 2024. +[28] Sen Lei, Xinyu Xiao, Tianlin Zhang, Heng-Chao Li, Zhenwei Shi, and Qing Zhu. Exploring fine-grained image-text alignment for referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024. +[29] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR, 2023. +[30] Kaiyu Li, Ruixun Liu, Xiangyong Cao, Xueru Bai, Feng Zhou, Deyu Meng, and Zhi Wang. Seearth-ov: Towards training-free open-vocabulary segmentation for remote sensing images. arXiv preprint arXiv:2410.01768, 2024. +[31] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS journal of photogrammetry and remote sensing, 159:296-307, 2020. +[32] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2018. +[33] Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023. +[34] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017. +[35] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023. +[36] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In Proceedings of the IEEE international conference on computer vision, pages 1271-1280, 2017. +[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023. +[38] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18653-18663, 2023. +[39] Si Liu, Tianrui Hui, Shaofei Huang, Yunchao Wei, Bo Li, and Guanbin Li. Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):4761-4775, 2021. +[40] Sihan Liu, Yiwei Ma, Xiaqing Zhang, Haowei Wang, Jiayi Ji, Xiaoshuai Sun, and Rongrong Ji. Rotated multi-scale interaction network for referring remote sensing image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26658-26668, 2024. +[41] Sun-Ao Liu, Yiheng Zhang, Zhaofan Qiu, Hongtao Xie, Yongdong Zhang, and Ting Yao. Caris: Context-aware referring image segmentation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 779-788, 2023. +[42] Xu Liu and Zhouhui Lian. Rsunivlm: A unified vision language model for remote sensing via granularity-oriented mixture of experts. arXiv preprint arXiv:2412.05679, 2024. + +[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF international conference on computer vision, pages 10012-10022, 2021. +[44] Yang Long, Gui-Song Xia, Shengyang Li, Wen Yang, Michael Ying Yang, Xiao Xiang Zhu, Liangpei Zhang, and Deren Li. On creating benchmark dataset for aerial image interpretation: Reviews, guidances, and million-aid. IEEE Journal of selected topics in applied earth observations and remote sensing, 14:4205–4230, 2021. +[45] Siqi Lu, Junlin Guo, James R Zimmer-Dauphinee, Jordan M Nieusma, Xiao Wang, Steven A Wernke, Yuankai Huo, et al. Vision foundation models in remote sensing: A survey. IEEE Geoscience and Remote Sensing Magazine, 2025. +[46] Xiaoqiang Lu, Binqiang Wang, Xiangtao Zheng, and Xuelong Li. Exploring models and data for remote sensing image caption generation. IEEE Transactions on Geoscience and Remote Sensing, 56(4):2183-2195, 2017. +[47] Edgar Margffoy-Tuay, Juan C Pérez, Emilio Botero, and Pablo Arbeláez. Dynamic multimodal instance segmentation guided by natural language queries. In Proceedings of the European Conference on Computer Vision (ECCV), pages 630–645, 2018. +[48] Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016. +[49] Sayan Nag, Koustava Goswami, and Srikrishna Karanam. Safari: Adaptive sequence tr a ns f ormer for we a kly supervised r eferring expression segmentat i on. In European Conference on Computer Vision, pages 485-503. Springer, 2024. +[50] Ruizhe Ou, Yuan Hu, Fan Zhang, Jiaxin Chen, and Yu Liu. Geopix: Multi-modal large language model for pixel-level image understanding in remote sensing. arXiv preprint arXiv:2501.06828, 2025. +[51] Chao Pang, Xingxing Weng, Jiang Wu, Jiayu Li, Yi Liu, Jiaxing Sun, Weijia Li, Shuai Wang, Litong Feng, Gui-Song Xia, et al. Vhm: Versatile and honest vision language model for remote sensing image analysis. arXiv preprint arXiv:2403.20213, 2024. +[52] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021. +[53] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13009-13018, 2024. +[54] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024. +[55] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26374-26383, 2024. +[56] Esther Rolf, Konstantin Klemmer, Caleb Robinson, and Hannah Kerner. Mission critical-satellite data is a distinct modality in machine learning. arXiv preprint arXiv:2402.01444, 2024. +[57] Akashah Shabbir, Mohammed Zumri, Mohammed Bennamoun, Fahad S Khan, and Salman Khan. Geopixel: Pixel grounding large multimodal model in remote sensing. arXiv preprint arXiv:2501.13925, 2025. + +[58] Chao Shang, Zichen Song, Heqian Qiu, Lanxiao Wang, Fanman Meng, and Hongliang Li. Prompt-driven referring image segmentation with instance contrasting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4124-4134, 2024. +[59] Claude E Shannon. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423, 1948. +[60] Hengcan Shi, Hongliang Li, Fanman Meng, and Qingbo Wu. Key-word-aware network for referring expression image segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 38-54, 2018. +[61] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsenko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024. +[62] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023. +[63] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017. +[64] Fengxiang Wang, Hongzhen Wang, Mingshuo Chen, Di Wang, Yulin Wang, Zonghao Guo, Qiang Ma, Long Lan, Wenjing Yang, Jing Zhang, et al. Xlrs-bench: Could your multimodal llms understand extremely large ultra-high-resolution remote sensing imagery? arXiv preprint arXiv:2503.23771, 2025. +[65] Junchi Wang and Lei Ke. Llm-seg: Bridging image segmentation and large language model reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1765-1774, 2024. +[66] Junjue Wang, Zhuo Zheng, Zihang Chen, Ailong Ma, and Yanfei Zhong. Earthvqa: Towards queryable earth via relational reasoning-based remote sensing visual question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 5481-5489, 2024. +[67] Junjue Wang, Zhuo Zheng, Ailong Ma, Xiaoyan Lu, and Yanfei Zhong. Loveda: A remote sensing land-cover dataset for domain adaptive semantic segmentation. arXiv preprint arXiv:2110.08733, 2021. +[68] Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36:61501-61513, 2023. +[69] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11686-11695, 2022. +[70] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004. +[71] Cong Wei, Yujie Zhong, Haoxian Tan, Yingsen Zeng, Yong Liu, Zheng Zhao, and Yujiu Yang. Instructseg: Unifying instructed visual segmentation with multi-modal large language models. arXiv preprint arXiv:2412.14006, 2024. +[72] Jiannan Wu, Yi Jiang, Peize Sun, Zehuan Yuan, and Ping Luo. Language as queries for referring video object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4974-4984, 2022. +[73] Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Zhe Chen, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionlm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. Advances in Neural Information Processing Systems, 37:69925-69975, 2025. + +[74] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. IEEE Transactions on Image Processing, 2024. +[75] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3858-3869, 2024. +[76] Bin Xiao, Haiping Wu, Weijian Xu, Xiyang Dai, Houdong Hu, Yumao Lu, Michael Zeng, Ce Liu, and Lu Yuan. Florence-2: Advancing a unified representation for a variety of vision tasks (2023). URL https://arxiv.org/abs/2311.06242, 2023. +[77] Zunnan Xu, Zhihong Chen, Yong Zhang, Yibing Song, Xiang Wan, and Guanbin Li. Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17503-17512, 2023. +[78] Cilin Yan, Haochen Wang, Shilin Yan, Xiaolong Jiang, Yao Hu, Guoliang Kang, Weidi Xie, and Efstratios Gavves. Visa: Reasoning video object segmentation via large language models. In European Conference on Computer Vision, pages 98-115. Springer, 2024. +[79] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. +[80] Senqiao Yang, Tianyuan Qu, Xin Lai, Zhuotao Tian, Bohao Peng, Shu Liu, and Jiaya Jia. Lisa++: An improved baseline for reasoning segmentation with large language model. arXiv preprint arXiv:2312.17240, 2023. +[81] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18155–18165, 2022. +[82] Linli Yao, Lei Li, Shuhuai Ren, Lean Wang, Yuanxin Liu, Xu Sun, and Lu Hou. Deco: Decoupling token compression from semantic abstraction in multimodal large language models. arXiv preprint arXiv:2405.20985, 2024. +[83] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10502–10511, 2019. +[84] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025. +[85] Zhenghang Yuan, Lichao Mou, Yuansheng Hua, and Xiao Xiang Zhu. Rrsis: Referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024. +[86] Yang Zhan, Zhitong Xiong, and Yuan Yuan. Rsvg: Exploring data and models for visual grounding on remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 61:1-13, 2023. +[87] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498, 2023. +[88] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. In International Conference on Machine Learning, pages 60116-60133. PMLR, 2024. +[89] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Chen Change Loy, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. Advances in Neural Information Processing Systems, 37:71737-71767, 2025. +[90] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14227-14238, 2024. + +[91] Yuxuan Zhang, Tianheng Cheng, Rui Hu, Lei Liu, Heng Liu, Longjin Ran, Xiaoxin Chen, Wenyu Liu, and Xinggang Wang. Evf-sam: Early vision-language fusion for text-prompted segment anything model. arXiv preprint arXiv:2406.20076, 2024. +[92] Zheng Zhang, Yeyao Ma, Enming Zhang, and Xiang Bai. Psalm: Pixelwise segmentation with large multi-modal model. In European Conference on Computer Vision, pages 74-91. Springer, 2024. +[93] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017. +[94] Li Zhou, Xu Yuan, Zenghui Sun, Zikun Zhou, and Jingsong Lan. Instruction-guided multi-granularity segmentation and captioning with large multimodal model. arXiv preprint arXiv:2409.13407, 2024. +[95] Yue Zhou, Mengcheng Lan, Xiang Li, Yiping Ke, Xue Jiang, Litong Feng, and Wayne Zhang. Geoground: A unified large vision-language model. for remote sensing visual grounding. arXiv preprint arXiv:2411.11904, 2024. + +# A Data + +# A.1 Annotation of EarthReason + +Each sample of the EarthReason benchmark consists of an image, a corresponding mask, and six reasoning queries along with their respective answers. Given that our metadata is derived from classification datasets, we employed GPT-4o and GPT-3.5 to generate textual annotations, and invited multiple remote sensing and vision experts to provide accurate and reliable mask annotations. Overall, our annotation process consists of the following three steps: + +- Step-1: To fully leverage the powerful multimodal capabilities and extensive geographic knowledge of GPT-4o, we carefully design the prompt, which is then provided alongside images and their corresponding category labels to generate a reasoning question-answer pair. The prompt is illustrated in Figure 6. +- Step-2: To avoid homogeneous question-answer formats under a single prompt, we further employ the textual capabilities of GPT-3.5 to expand each generated question into six variations and each answer into three alternatives. The prompt used for this expansion is shown in Figure 7. +- Step-3: Unlike previous methods that rely on semi-automatic mask annotation based on off-the-shelf bounding boxes or masks, we invite multiple remote sensing vision experts to perform accurate and efficient mask annotation guided by the generated questions. To further improve annotation efficiency, we incorporate SAM-H as an auxiliary tool for some simple targets. Subsequently, we perform cross-validation of the annotation results and re-associate the samples that do not meet the quality standards. As shown in Figure 8, (a), (b), and (c), derived from the RRSIS-D dataset, illustrate the masks of semi-automatic annotation based on bounding boxes. (a) and (c) exhibit noticeable annotation errors, while in (b), the query does not align with the annotation. (d), (e), and (f) illustrate our high-quality manual annotations. + +Prompt: You are an expert in geographic remote sensing imagery. Please fully analyze the geographical landscape and cultural features in remote sensing images. Generate an implicit reasoning questions based on given object categories. Please use your imagination and feel free to change the sentence structure or add a situation description. Just give the implicit reasoning questions that meet the requirements. The descriptions must refer to the natural landscapes and cultural landscapes shown in remote sensing images. The output implicit reasoning questions need to meet the following requirements: + +(1) Please imagine the scene and output an implicit reasoning question to describe the attributes or functions of the given object. The output question must have a certain degree of reasoning difficulty and be helpful to humans. + +(2) Do not explicitly write the name or description of the target object in the original text. Questions should be asked in the form of asking where, which infrastructure, how to do a certain activity, which location, what object. + +(3) The output cannot be redundant. Just give one question and its answer that you think has the highest confidence. The question should be at least 18 words. The answer requires giving the name of the given object and then explaining why the answer is this, about 20 words. + +(4) Do not output any redundant information except the question and the answer, and separate them with line break. + +The given object category is . + +![](images/8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg) +Figure 6: The illustration of the prompt construction process for generating question-answer pairs for geospatial pixel reasoning. + +![](images/cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg) + +Question: If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access? + +Answer: The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response. + +# A.2 EarthReason Statistics + +The EarthReason benchmark comprises 28 categories, and the number of samples in each category is shown in Figure 9 (a). It can be observed that the distribution of the 28 categories is relatively balanced. Figure 9 (b), (c), and (d) illustrate the category distributions in the training, validation, and test sets, respectively. To evaluate the model's generalization capability, we specifically excluded four categories—"basketball court", "island", "lake", and "stadium"—from the training set. Moreover, we introduced 119 empty target samples to mitigate potential hallucinations of the model. + +Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 6 related questions or topics. The given question is "If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?" + +![](images/8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg) + +Response: 1. In case of an earthquake, where can individuals find a clear and accessible area nearby to assemble safely? 2. During an earthquake, what spacious and unobstructed area close by would provide the safest environment for all of us to meet? 3. Should an earthquake happen, where in the vicinity can people find an open area to come together safely and securely? 4. In the scenario of an earthquake, where can residents find a secure area with open space and accessible paths for evacuation? 5. In an earthquake situation, which nearby area featuring open space and clear pathways would be the safest choice for resident evacuation? 6. If an earthquake occurs, which types of infrastructure are anticipated to function as dependable gathering spots with adequate open areas? + +Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 3 related questions or topics. The given description is "The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response." + +![](images/cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg) +Figure 7: The illustration of the prompt construction process for expand question-answer pairs. + +Response: 1. With minimal obstructions and easy access to main roads, the ground track field offers both safety and convenience, positioning it as the most reliable site in terms of hazard prevention and emergency preparedness. 2. The expansive open space of the track field provides a secure environment, greatly minimizing the chance of falling objects. This makes it an ideal refuge from any potential dangers. 3. With its generous outdoor space devoid of any encroaching structures, the ground track field emerges as the premier choice, ensuring an environment where safety is prioritized. + +![](images/63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg) +(a) query: The gray bridge + +![](images/6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg) +Semi-automatic Annotation +(b) query: The blue storage tank + +![](images/3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg) +(c) query: The gray bridge in the middle + +![](images/64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg) +(d) +Figure 8: Comparison of annotation quality. (a), (b) and (c) are from RRSIS-D dataset, (d), (e) and (f) are from our EarthReason dataset. + +![](images/b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg) +Manual Annotation +(e) + +![](images/ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg) +(f) + +# B Additional Implementation Details + +# B.1 Details of Training Hyper-parameters + +Table 8 presents the hyper-parameter settings used during the training of our model. For training on the referring segmentation datasets, we employ only focal loss and dice loss to supervise mask generation. In contrast, for training on geospatial pixel reasoning task, we additionally incorporate the cross-entropy loss from the large language model to supervise text answer generation. + +![](images/fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg) +(a) category distribution of the EarthReason + +![](images/b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg) +(b) category distribution of the training set + +![](images/1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg) +(c) category distribution of the validation set +Figure 9: The category distribution of EarthReason. + +![](images/8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg) +(d) category distribution of the test set + +Table 8: The hyper-parameters for model training. + +
ParametersValue
OptimizerAdamW
Learning Rate1 × 10-4
Batch Size16
Number of Iteration7,610 / 5,400 / 2,220
Learning Rate ScheduleCosine Decay
Weight Decay0.0
Warmup Ratio0.03
β10.9
β20.999
Image Size1024 × 1024
Image ProcessingResize long edge to 1024 and padding short edge to 1024.
+ +# C Examples + +# C.1 More Qualitative Results on EarthReason + +Figure 10 presents a comparison between SegEarth-R1 and other models on the EarthReason dataset. It can be observed that our model demonstrates a better understanding of long reasoning instructions and produces more accurate mask generation. + +# C.2 More Qualitative Results on RRSIS-D + +Figure 11 presents a comparison between SegEarth-R1 and PSALM on the RRSIS-D dataset. Our model demonstrates a better understanding of direct geographical attributes such as location, color, and size compared to PSALM. This improvement is attributed to the removal of indirect mask + +prediction using mask tokens, allowing semantic information (description embeddings) to directly interact with image features to generate masks. + +![](images/3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg) +Figure 10: Comparison with other models on EarthReason. + +![](images/c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg) +Figure 11: Comparison with PSALM on RRSIS-D. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09644/images/07ead8f5007da4af1336a18007e5e0291b9497abe37e8cec54c29f8a7d7560f2.jpg b/data/2025/2504_09xxx/2504.09644/images/07ead8f5007da4af1336a18007e5e0291b9497abe37e8cec54c29f8a7d7560f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..83a40a5f6e37b5415df0b7cc005c6aab5e81b9da --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/07ead8f5007da4af1336a18007e5e0291b9497abe37e8cec54c29f8a7d7560f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b34e54981bbec9bd0cf1f9ebe9f34b7e16560220907fb157069c9d48dcdeda44 +size 48914 diff --git a/data/2025/2504_09xxx/2504.09644/images/0f88521154e49797916ce7004643aedd03775ca6d240ab8fe3dc4e9d7ad29321.jpg b/data/2025/2504_09xxx/2504.09644/images/0f88521154e49797916ce7004643aedd03775ca6d240ab8fe3dc4e9d7ad29321.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3f0c00e14f8be1f06b08a2acd88fa8b333166f5a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/0f88521154e49797916ce7004643aedd03775ca6d240ab8fe3dc4e9d7ad29321.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6cd74bc10622ab08e3d758bb67e416cc4d2209c94943ccd24ea148a283f317d4 +size 67469 diff --git a/data/2025/2504_09xxx/2504.09644/images/111d5a3571b67592dfe806350929861d98d9bf966abdfc8ad4b032f32e50a48f.jpg b/data/2025/2504_09xxx/2504.09644/images/111d5a3571b67592dfe806350929861d98d9bf966abdfc8ad4b032f32e50a48f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0e6e679ffc50c69f10eedb38eeaca00f4f1dbb0b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/111d5a3571b67592dfe806350929861d98d9bf966abdfc8ad4b032f32e50a48f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5393c2692a4fb9a31ff61f9dd1b1cafc660700a6e6656cfa648813a09cd7f9d8 +size 4195 diff --git a/data/2025/2504_09xxx/2504.09644/images/1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg b/data/2025/2504_09xxx/2504.09644/images/1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg new file mode 100644 index 0000000000000000000000000000000000000000..08888e52a3b8c4cf2b768227d05f257fb05fbdb6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:937b1f73d16c749144b75bf561e8eb398a19747ef97008bda7e46191475a3b11 +size 21145 diff --git a/data/2025/2504_09xxx/2504.09644/images/3776661b246a2704a8f32afe1833b4a0a2c68b8ab637443163a5695fc0cdb152.jpg b/data/2025/2504_09xxx/2504.09644/images/3776661b246a2704a8f32afe1833b4a0a2c68b8ab637443163a5695fc0cdb152.jpg new file mode 100644 index 0000000000000000000000000000000000000000..079574a2119804e3d2333c63f94b5aaffb5f67c1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/3776661b246a2704a8f32afe1833b4a0a2c68b8ab637443163a5695fc0cdb152.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:75123b79848bff0175c1a44e45a7d2809ba0013419adb173caa3bfb6a4784ab5 +size 5930 diff --git a/data/2025/2504_09xxx/2504.09644/images/3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg b/data/2025/2504_09xxx/2504.09644/images/3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ccbedcc39ea83d64b6ff80b0f214678974e418e0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a76e8c519940ad4fe9b687ac91f93a4b7f3827640a1bf9acc655c98b1bf155b1 +size 302865 diff --git a/data/2025/2504_09xxx/2504.09644/images/3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg b/data/2025/2504_09xxx/2504.09644/images/3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e4daf1b867ba7f269802ee0d2644273cdf0d5bd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8d63e9d3061b880708a6cab1cac2cffb5f9c0bd8ffbcac0012884239a5f76260 +size 16733 diff --git a/data/2025/2504_09xxx/2504.09644/images/546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg b/data/2025/2504_09xxx/2504.09644/images/546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fe0247bb86f24fe56f605762401d07a5e9bcf334 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fe11215a989e2bd4956e52b6f749d67f238ea83a02024ca1269fee0ce9ed0ae3 +size 16929 diff --git a/data/2025/2504_09xxx/2504.09644/images/626e52b1d30a91189d3148a9dff60dcfc5c62d67257be0fb854d0081f77df054.jpg b/data/2025/2504_09xxx/2504.09644/images/626e52b1d30a91189d3148a9dff60dcfc5c62d67257be0fb854d0081f77df054.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7baacab7f8e39e3bd8d4e6bcfd457d8631324e8c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/626e52b1d30a91189d3148a9dff60dcfc5c62d67257be0fb854d0081f77df054.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:316cda49ac8742a681fd1cb45ca209c54bf3e8b69e3dc4729231b263c4c3fcf7 +size 29554 diff --git a/data/2025/2504_09xxx/2504.09644/images/63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg b/data/2025/2504_09xxx/2504.09644/images/63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c93eaf5d0002b800fd6986ba8d24860437c1cb7c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29198f2e51c3939d157c0875fe23326ff26cbd14335c10fabf082c42933c2447 +size 16463 diff --git a/data/2025/2504_09xxx/2504.09644/images/64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg b/data/2025/2504_09xxx/2504.09644/images/64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b174dd9df903c2bb6553a2b661e92348db480796 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:033a677dfd0de7bf5f0b29ed6e6fa4aba4821efd8982c502472e7ef918fc9593 +size 8049 diff --git a/data/2025/2504_09xxx/2504.09644/images/64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg b/data/2025/2504_09xxx/2504.09644/images/64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8886ee83965a958df42d31367f06cae7b4373bf0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0ec24d3337658c0749d89ebd49c1f8c2e37d5f09b836222b1c78190caeb9934d +size 14098 diff --git a/data/2025/2504_09xxx/2504.09644/images/6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg b/data/2025/2504_09xxx/2504.09644/images/6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eabd5df8af43be47571f695737fc75da486823d7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a99153d5cf43aba0f9e26a01cfc3fc2b379be13f8fbbe46acfe87f72e0e040ff +size 13085 diff --git a/data/2025/2504_09xxx/2504.09644/images/6b0e75a6bcd18a0617f3eaf4d14243d21e995e52771d154683c5f24aade7fcc6.jpg b/data/2025/2504_09xxx/2504.09644/images/6b0e75a6bcd18a0617f3eaf4d14243d21e995e52771d154683c5f24aade7fcc6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7f3673669fc38f55abc2664b8280949cae8fc7b1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/6b0e75a6bcd18a0617f3eaf4d14243d21e995e52771d154683c5f24aade7fcc6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:420efdefe28500a9459dea8916436ba866f0374ca80e9737a179a77ae89d1fd6 +size 11665 diff --git a/data/2025/2504_09xxx/2504.09644/images/7ff79a3cf7322060499f5dd9bfc86b7f0a71adf84720a93f08a6b713b74830aa.jpg b/data/2025/2504_09xxx/2504.09644/images/7ff79a3cf7322060499f5dd9bfc86b7f0a71adf84720a93f08a6b713b74830aa.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8585daa210c809ffa49297dba0c8483cbf186ace --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/7ff79a3cf7322060499f5dd9bfc86b7f0a71adf84720a93f08a6b713b74830aa.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d160aad26edbb8c4ea9b276ea849cd02d5e37311bec5e53da27fd165222926c2 +size 9615 diff --git a/data/2025/2504_09xxx/2504.09644/images/820d4bbd8c8037bc39591915e14ea1faf08f55353abad5a2259126f713034a38.jpg b/data/2025/2504_09xxx/2504.09644/images/820d4bbd8c8037bc39591915e14ea1faf08f55353abad5a2259126f713034a38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c163f5b8c07c8d1608cc614d390deff68e013819 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/820d4bbd8c8037bc39591915e14ea1faf08f55353abad5a2259126f713034a38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ce143206c971da62ef5de4630d417620099a9d44b3d1a0112e83f638e4ee407 +size 6618 diff --git a/data/2025/2504_09xxx/2504.09644/images/83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg b/data/2025/2504_09xxx/2504.09644/images/83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg new file mode 100644 index 0000000000000000000000000000000000000000..20bc099a80e4ed90ac05e660ef52cd97958a913b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:36fb00466a22b6fcfd0f59d4a042c7d8d02fd019c755b6c13295321b2335a356 +size 6826 diff --git a/data/2025/2504_09xxx/2504.09644/images/8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg b/data/2025/2504_09xxx/2504.09644/images/8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b0e85e97f0d793b77caf2a04f563417296e6a400 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0f5b344eb354dbc937ee8f613bd9298dcdac9e71c6a27f0d39b95facc21507d3 +size 1367 diff --git a/data/2025/2504_09xxx/2504.09644/images/8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg b/data/2025/2504_09xxx/2504.09644/images/8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..426ca2f801e80ec85ecc25265728a00077570e2b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d2f19a4314db8209d314cd2fcb7f107ac899114954cbf8e4f79b0431cdabf763 +size 20904 diff --git a/data/2025/2504_09xxx/2504.09644/images/8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg b/data/2025/2504_09xxx/2504.09644/images/8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg new file mode 100644 index 0000000000000000000000000000000000000000..625269f6b8fb284ab89ac4fe18b6dfd62a30a70f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3b38526f901a701fabbf155bd345ca2c4418fbda556a889b71387741a8718793 +size 1219 diff --git a/data/2025/2504_09xxx/2504.09644/images/97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg b/data/2025/2504_09xxx/2504.09644/images/97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09c550824197f9665af34d87a39ec97df8413214 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54483b556f966ee3b923bb67aacb7ea40db1837f129862cc0cc33bd852756cfd +size 10276 diff --git a/data/2025/2504_09xxx/2504.09644/images/a22e302b05e8e7f61e8118a42801d303ff0685a00d8e30275a530a2156b8560b.jpg b/data/2025/2504_09xxx/2504.09644/images/a22e302b05e8e7f61e8118a42801d303ff0685a00d8e30275a530a2156b8560b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..24e886683dcecf24be15a020cbf67b91875b8c0a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/a22e302b05e8e7f61e8118a42801d303ff0685a00d8e30275a530a2156b8560b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:38a6a6f1658b1170a91adb2162114b9844fce3d3593b27c4f13ab39482c2bd48 +size 14015 diff --git a/data/2025/2504_09xxx/2504.09644/images/ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg b/data/2025/2504_09xxx/2504.09644/images/ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg new file mode 100644 index 0000000000000000000000000000000000000000..741db38e679c323d81ce826fe81742a777123abe --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e39904c7824d231e53ee04a2344739f364d0078f6108c8e9fae660e959c5f3f4 +size 12047 diff --git a/data/2025/2504_09xxx/2504.09644/images/b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg b/data/2025/2504_09xxx/2504.09644/images/b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c874cfe835a7e569f637cb089bcda92497396ed6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:617c0cadb0a990d16d845f6e5825f6f666b3aa773fc1b91ae945b0b59d1b16b9 +size 23816 diff --git a/data/2025/2504_09xxx/2504.09644/images/b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg b/data/2025/2504_09xxx/2504.09644/images/b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg new file mode 100644 index 0000000000000000000000000000000000000000..91515bdfdb4212994b9cecdb6c454878a9f349de --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4622851b5552b4eef6c816002e11b670f36588e5adb6b47942efecca91c7b4d5 +size 21264 diff --git a/data/2025/2504_09xxx/2504.09644/images/bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg b/data/2025/2504_09xxx/2504.09644/images/bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..538bd7f30807b2807a81cf4a6409f50a0b535417 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:664a166a88f42e8d8448daada51f2fe007cf6df2ff6bbeb275a11797dbea028c +size 6801 diff --git a/data/2025/2504_09xxx/2504.09644/images/c1fae4041eba706b24a7df9c0e3dce21c67da5799853e8d45371e6e5a8da4d35.jpg b/data/2025/2504_09xxx/2504.09644/images/c1fae4041eba706b24a7df9c0e3dce21c67da5799853e8d45371e6e5a8da4d35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0a71d662abe715e0622358d437b4f6339b9ab681 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/c1fae4041eba706b24a7df9c0e3dce21c67da5799853e8d45371e6e5a8da4d35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4bdf3f1b082add941baf5e7eff3ae01a25495b22446d5f78a6390a71a96ef844 +size 119651 diff --git a/data/2025/2504_09xxx/2504.09644/images/c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg b/data/2025/2504_09xxx/2504.09644/images/c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09ec914a9045d1fb1d41a436ee5522acf105b5dd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1afc61e5a28b29dd9c5aad7c5ae38b5f0b3ee12f9c633ecf745f3d98237e87a +size 240682 diff --git a/data/2025/2504_09xxx/2504.09644/images/cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg b/data/2025/2504_09xxx/2504.09644/images/cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3e138961b8c64559c3d264effe3d1608d5dc6a9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2a90eb7c9e75b95d1eefe90de7a8e171e034ff5a593961a16c1c9af9aa06432d +size 1202 diff --git a/data/2025/2504_09xxx/2504.09644/images/cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg b/data/2025/2504_09xxx/2504.09644/images/cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f0aff63a93c66fb0a1c5bb151b12c1f55b519dc0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3ac5ff117c77ac35ffa3df2bd8fbf9fd10cfa48c9644d7e2bf06abe666e2c181 +size 15179 diff --git a/data/2025/2504_09xxx/2504.09644/images/dc831f471bedfc8c2e1993f561407f42a3fd0fdcd0a7da269d84584453229743.jpg b/data/2025/2504_09xxx/2504.09644/images/dc831f471bedfc8c2e1993f561407f42a3fd0fdcd0a7da269d84584453229743.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2e33cc6f3cb039afcfcf0e83519535dac2a47b2a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/dc831f471bedfc8c2e1993f561407f42a3fd0fdcd0a7da269d84584453229743.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dedb49cb8e52c0b48550f8a36e78ef14ab276bb9ceaa9b8137764307c6ecb503 +size 11800 diff --git a/data/2025/2504_09xxx/2504.09644/images/e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg b/data/2025/2504_09xxx/2504.09644/images/e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..feb240a669a38eca6303fb221cd41a3d8716a8a9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8b3fe61998510743069ca820b5354bcfd650a2361c012256c91e55eadf2acf80 +size 92027 diff --git a/data/2025/2504_09xxx/2504.09644/images/e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg b/data/2025/2504_09xxx/2504.09644/images/e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ebd390951f2bcce4a61b0b92307a0946c23af4e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ce0573fb8249da910f6eec26652afeb9b14683f0ed2963a17ed82010ba5d35e0 +size 12992 diff --git a/data/2025/2504_09xxx/2504.09644/images/e874fa801bbf6562c1840a990ffd4cc51dc1bf857b6a6d160c17d709d5e5430e.jpg b/data/2025/2504_09xxx/2504.09644/images/e874fa801bbf6562c1840a990ffd4cc51dc1bf857b6a6d160c17d709d5e5430e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..836d2d7f1f31f7ad468891661f10286385ab94c5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/e874fa801bbf6562c1840a990ffd4cc51dc1bf857b6a6d160c17d709d5e5430e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2fda6ada827188c375cbe51b0cbbce1937ea6dc381174bfa80006ea92f033783 +size 22237 diff --git a/data/2025/2504_09xxx/2504.09644/images/e983c8e939641fcff8dac8f408090304ed34c7107d976403941096343d9e31d0.jpg b/data/2025/2504_09xxx/2504.09644/images/e983c8e939641fcff8dac8f408090304ed34c7107d976403941096343d9e31d0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..15b33101c64fd142fa3e7ff9b560e8fe147f06ae --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/e983c8e939641fcff8dac8f408090304ed34c7107d976403941096343d9e31d0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9e45024cd6a061553ebb7926fca45dd2452f65a4e60d445d3dc871998555b18e +size 43596 diff --git a/data/2025/2504_09xxx/2504.09644/images/f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg b/data/2025/2504_09xxx/2504.09644/images/f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3ea24bed5dcf20604d8fceb8371968ff8789d90d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0db2a4ba6cfbe44a4dd7ded151da5b7d84408b8e2c65c03b3739ec02c1a8d38f +size 59689 diff --git a/data/2025/2504_09xxx/2504.09644/images/fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg b/data/2025/2504_09xxx/2504.09644/images/fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a916813130cc11117c46a971b181278bb81e596 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af3422b41c7542ebfa519b9aa0ce369ed7cc9ff8635db43a047a02236694ad22 +size 9279 diff --git a/data/2025/2504_09xxx/2504.09644/images/fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg b/data/2025/2504_09xxx/2504.09644/images/fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg new file mode 100644 index 0000000000000000000000000000000000000000..079bf0948397a2a05638fac2d64d46eb6c1c0190 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/images/fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:20ad5535b137f34e48063a1abb21cffc0f8a17ba0ce0bcb525a55321d3acc069 +size 21282 diff --git a/data/2025/2504_09xxx/2504.09644/layout.json b/data/2025/2504_09xxx/2504.09644/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b25e9653c854c2709e994447603a630874156bf1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09644/layout.json @@ -0,0 +1,11834 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 113, + 97, + 500, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 97, + 500, + 138 + ], + "spans": [ + { + "bbox": [ + 113, + 97, + 500, + 138 + ], + "type": "text", + "content": "SegEarth-R1: Geospatial Pixel Reasoning via Large Language Model" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "spans": [ + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": "Kaiyu Li" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Zepeng Xin" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1,\\ast}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Li Pang" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Chao Pang" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Yupeng Deng" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Jing Yao" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Guisong Xia" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Deyu Meng" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Zhi Wang" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Xiangyong Cao" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "inline_equation", + "content": "^{1,\\dagger}" + }, + { + "bbox": [ + 130, + 164, + 479, + 201 + ], + "type": "text", + "content": ", Xi'an Jiaotong University" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 281, + 216, + 329, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 216, + 329, + 228 + ], + "spans": [ + { + "bbox": [ + 281, + 216, + 329, + 228 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 140, + 239, + 471, + 470 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 239, + 471, + 470 + ], + "spans": [ + { + "bbox": [ + 140, + 239, + 471, + 470 + ], + "type": "text", + "content": "Remote sensing has become critical for understanding environmental dynamics, urban planning, and disaster management. However, traditional remote sensing workflows often rely on explicit segmentation or detection methods, which struggle to handle complex, implicit queries that require reasoning over spatial context, domain knowledge, and implicit user intent. Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To advance this task, we construct and release the first large-scale benchmark dataset called EarthReason, which comprises 5,434 manually annotated image masks with over 30,000 implicit question-answer pairs. Moreover, we propose SegEarth-R1, a simple yet effective language-guided segmentation baseline that integrates a hierarchical visual encoder, a large language model (LLM) for instruction parsing, and a tailored mask generator for spatial correlation. The design of SegEarth-R1 incorporates domain-specific adaptations, including aggressive visual token compression to handle ultra-high-resolution remote sensing images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Extensive experiments demonstrate that SegEarth-R1 achieves state-of-the-art performance on both reasoning and referring segmentation tasks, significantly outperforming traditional and LLM-based segmentation methods. Our data and code will be released at https://github.com/earth-insights/SegEarth-R1." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "spans": [ + { + "bbox": [ + 105, + 492, + 192, + 504 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "spans": [ + { + "bbox": [ + 104, + 516, + 506, + 659 + ], + "type": "text", + "content": "Earth observation through remote sensing has emerged as a cornerstone of modern geospatial analysis, enabling unprecedented insights into environmental dynamics, urban planning, and disaster management [56, 45]. Satellite and aerial images provide a unique vantage point for monitoring planetary-scale phenomena, ranging from deforestation patterns to coastal erosion. However, converting this raw pixel data into actionable insights requires more than traditional computer vision techniques; it demands models capable of reasoning about spatial context, domain knowledge, and implicit user intent. Conventional remote sensing workflows predominantly rely on explicit tasks, e.g., semantic segmentation and referring segmentation [44, 8, 85], which operate within fixed taxonomies and require precise user instructions. While effective for well-defined scenarios, these approaches struggle to accommodate complex, implicit queries—for example, identifying regions at elevated risk of landslides based on slope, vegetation cover, and proximity to infrastructure. Such tasks limit implicit reasoning over heterogeneous spatial patterns, object relationships, and environmental metadata, exceeding the capabilities of standard segmentation or detection pipelines." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 663, + 506, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 663, + 506, + 687 + ], + "spans": [ + { + "bbox": [ + 104, + 663, + 506, + 687 + ], + "type": "text", + "content": "Motivated by this, we introduce a new task, i.e., geospatial pixel reasoning, which allows implicit querying and reasoning and generates the mask of the target region. To enable research in this task," + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "spans": [ + { + "bbox": [ + 14, + 208, + 37, + 561 + ], + "type": "text", + "content": "arXiv:2504.09644v1 [cs.CV] 13 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 693, + 193, + 703 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 693, + 193, + 703 + ], + "spans": [ + { + "bbox": [ + 116, + 693, + 193, + 703 + ], + "type": "text", + "content": "*Equal contribution" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 118, + 704, + 325, + 715 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 704, + 325, + 715 + ], + "spans": [ + { + "bbox": [ + 118, + 704, + 325, + 715 + ], + "type": "text", + "content": "†Corresponding author: caoxiangyong@mail.xjtu.edu.cn" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 193, + 742 + ], + "type": "text", + "content": "Preprint. Under review." + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 75, + 196, + 168 + ], + "blocks": [ + { + "bbox": [ + 111, + 75, + 196, + 168 + ], + "lines": [ + { + "bbox": [ + 111, + 75, + 196, + 168 + ], + "spans": [ + { + "bbox": [ + 111, + 75, + 196, + 168 + ], + "type": "image", + "image_path": "e85cefccaf9ec3cf9d9cebefeda3c3348534e23088620d9eea2f0a676884b032.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 169, + 197, + 261 + ], + "blocks": [ + { + "bbox": [ + 111, + 169, + 197, + 261 + ], + "lines": [ + { + "bbox": [ + 111, + 169, + 197, + 261 + ], + "spans": [ + { + "bbox": [ + 111, + 169, + 197, + 261 + ], + "type": "image", + "image_path": "546e8773c1096cb376d642a5780c930b2168aa5bb02a7d60ca5a69f2fe46b2bf.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 119, + 261, + 186, + 266 + ], + "lines": [ + { + "bbox": [ + 119, + 261, + 186, + 266 + ], + "spans": [ + { + "bbox": [ + 119, + 261, + 186, + 266 + ], + "type": "text", + "content": "A tennis court on the far left" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 277, + 504, + 317 + ], + "lines": [ + { + "bbox": [ + 104, + 277, + 504, + 317 + ], + "spans": [ + { + "bbox": [ + 104, + 277, + 504, + 317 + ], + "type": "text", + "content": "Figure 1: Comparison of semantic segmentation, referring segmentation and geospatial pixel inference. (left) Samples from the LoveDA [67] and RRSIS-D [40] datasets. (right) Samples from the EarthReason dataset. Previous tasks are limited by fixed taxonomies and explicit instructions, while geospatial pixel reasoning supports complex implicit instructions and requires the reasoning capability of the model." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 206, + 89, + 498, + 233 + ], + "blocks": [ + { + "bbox": [ + 298, + 76, + 410, + 87 + ], + "lines": [ + { + "bbox": [ + 298, + 76, + 410, + 87 + ], + "spans": [ + { + "bbox": [ + 298, + 76, + 410, + 87 + ], + "type": "text", + "content": "Geospatial Pixel Reasoning" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 206, + 89, + 498, + 233 + ], + "lines": [ + { + "bbox": [ + 206, + 89, + 498, + 233 + ], + "spans": [ + { + "bbox": [ + 206, + 89, + 498, + 233 + ], + "type": "image", + "image_path": "e1edabbdc7f67bf6368fb9df675d9fff192ba33bc1b09dd692c8978388be11ca.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 236, + 310, + 265 + ], + "lines": [ + { + "bbox": [ + 209, + 236, + 310, + 265 + ], + "spans": [ + { + "bbox": [ + 209, + 236, + 310, + 265 + ], + "type": "text", + "content": "USER: When there is an urgent necessity for emergency medical services, what designated location facilitates rapid air evacuations for patients?" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 333, + 236, + 493, + 265 + ], + "lines": [ + { + "bbox": [ + 333, + 236, + 493, + 265 + ], + "spans": [ + { + "bbox": [ + 333, + 236, + 493, + 265 + ], + "type": "text", + "content": "ASSISTANT: Perched atop the medical complex, the airport helipad facilitates swift helicopter evacuations for patients in critical condition. This strategic location offers immediate access to vital healthcare services, ensuring timely assistance in emergencies." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "text", + "content": "we build and release the first large-scale benchmark dataset, called EarthReason, which contains 5,434 manually annotated remote sensing image-mask pairs drawn from diverse classification sources, covering 28 scene categories at spatial resolutions ranging from " + }, + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "inline_equation", + "content": "0.5\\mathrm{m}" + }, + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "inline_equation", + "content": "153\\mathrm{m}" + }, + { + "bbox": [ + 104, + 334, + 504, + 411 + ], + "type": "text", + "content": ". Each image is paired with multiple implicit reasoning questions that require the model to infer target masks based on contextual and domain-specific knowledge, rather than explicit object names. In addition, by incorporating empty target cases and varying spatial scales, EarthReason pushes models to generalize across complex real-world remote sensing scenarios." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 416, + 504, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 416, + 504, + 536 + ], + "spans": [ + { + "bbox": [ + 104, + 416, + 504, + 536 + ], + "type": "text", + "content": "Recent progress in multimodal large language models (MLLMs) has demonstrated impressive performance in natural image domains, where models like LISA [26] and PixelLM [55] leverage large language models (LLMs) [62, 7, 79] to interpret rich textual prompts and generate pixel-level outputs. These frameworks excel at tasks such as reasoning segmentation [26], where the target mask is not directly specified but must be inferred from nuanced language cues. Unfortunately, directly transferring these methods to geospatial pixel reasoning is non-trivial since remote sensing images present extreme scale variation, densely packed small-scale objects and ultra-high resolution that violate assumptions of natural images. Moreover, different from natural images, remote sensing queries often require spatial correlations. For instance, identifying \"informal settlements\" relies on detecting roof material irregularities, road network fragmentation, and spatial adjacency to legal land-use zones." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "spans": [ + { + "bbox": [ + 104, + 541, + 504, + 640 + ], + "type": "text", + "content": "To address these challenges, we present SegEarth-R1, a simple yet effective language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing, and a tailored mask generator designed for spatial correlation. Further, some components are also designed to adapt to the characteristics of remote sensing images. Specifically, we propose the aggressive visual token compression to handle ultra-high-resolution images, a description projection module to fuse language and multi-scale features, and a streamlined mask prediction pipeline that directly queries description embeddings. Despite its architectural simplicity, SegEarth-R1 achieves advanced performance on EarthReason and referring segmentation datasets, significantly outperforming both traditional and LLM-based segmentation methods." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 646, + 289, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 289, + 657 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 289, + 657 + ], + "type": "text", + "content": "In summary, our contributions are as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 669, + 504, + 723 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "spans": [ + { + "bbox": [ + 104, + 669, + 504, + 691 + ], + "type": "text", + "content": "- We introduce the geospatial pixel reasoning task, which requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 700, + 504, + 723 + ], + "type": "text", + "content": "- We build and release the first large-scale benchmark with 5,434 image-mask pairs, 28 categories, and over 30,000 implicit question-answer pairs, fostering research in geospatial pixel reasoning." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 131 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "- We propose an LLM-based segmentation model, SegEarth-R1, which incorporates new segmentation capabilities in remote sensing, containing several domain-specific designs." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 109, + 504, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 109, + 504, + 131 + ], + "spans": [ + { + "bbox": [ + 104, + 109, + 504, + 131 + ], + "type": "text", + "content": "- Extensive experiments show that SegEarth-R1 achieves state-of-the-art performance on reasoning and referring segmentation tasks, compared to traditional methods and other LLM-based methods." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 105, + 158, + 197, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 158, + 197, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 158, + 197, + 170 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 189, + 233, + 201 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 189, + 233, + 201 + ], + "spans": [ + { + "bbox": [ + 105, + 189, + 233, + 201 + ], + "type": "text", + "content": "2.1 Referring Segmentation" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 213, + 506, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 213, + 506, + 434 + ], + "spans": [ + { + "bbox": [ + 104, + 213, + 506, + 434 + ], + "type": "text", + "content": "Referring segmentation aims to segment targets in an image based on natural language descriptions, requiring precise alignment between linguistic expressions and visual content. Early approaches adopted CNN-RNN/LSTM frameworks [18, 36, 32, 47, 60, 21] to extract visual features and encode textual queries, respectively. However, these methods struggled with complex expressions due to limited local receptive fields and insufficient cross-modal interaction [24]. To address these limitations, attention mechanisms [63] emerged as a pivotal technique [10, 81, 72, 19, 77, 49, 74, 58]. VLT [10] dynamically generates adaptive query vectors based on image-text interactions, enabling precise localization through cross-modal attention. LAVT [81] further advances this paradigm by integrating hierarchical visual-linguistic fusion within a Swin Transformer [43] backbone, where pixel-word attention refines multiscale features to achieve fine-grained semantic alignment. In remote sensing, specifying segmentation for certain instances can improve interpretation efficiency and user interactivity. Recently, Yuan et al. [85] introduced referring segmentation into satellite images for the first time. Subsequently, following the LAVT [81] architecture, RMSIN [40] also incorporated adaptive rotated convolutions to address scale and orientation variations. FIANet [28] and CroBIM [12] introduced elaborate cross-modal interactions for feature alignment. RSSep [17] reformulated referring segmentation as a sequence-to-sequence task, predicting polygonal boundaries to handle scale variations and blurred edges [38]. However, existing methods effectively follow explicit instructions for target segmentation but lack implicit intent reasoning. In this paper, the proposed geospatial pixel reasoning task advances beyond referring segmentation by employing LLMs' reasoning capabilities to interpret subtle instructions and accurately segment desired targets." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 456, + 241, + 468 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 456, + 241, + 468 + ], + "spans": [ + { + "bbox": [ + 105, + 456, + 241, + 468 + ], + "type": "text", + "content": "2.2 LLM-based Segmentation" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 481, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 481, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 481, + 506, + 723 + ], + "type": "text", + "content": "Recent advances in LLMs have significantly expanded their capabilities to integrate pixel-level segmentation with language reasoning [76, 68, 73, 2, 61, 88, 84, 16]. For instance, Florence-2 [76] unified text, detection, and segmentation through a sequence-to-sequence framework with task instructions. To address the complexity of real-world segmentation scenarios, some works focus on architectural specialization and instruction-aware adaptation. LISA [26, 80] established the paradigm by introducing a [SEG] token to connect LLMs with segmentation decoders like SAM [25], enabling language-guided mask prediction. Subsequent studies enhanced this paradigm: GSVA [75] introduced shared-weight [SEG] tokens and [REJ] tokens for multi-target and empty-target handling [35, 55, 90], while GLaMM [53] achieved pixel-grounded conversational capabilities through holistic segmentation [94]. Parallel efforts focused on architectural unification - PSALM [92] established a flexible input schema for multi-task segmentation, and OMG-LLaVA [89] combined universal segmentation backbones with LLMs for pixel-level reasoning. Video understanding extensions emerged through VISA [78] and InstructSeg [71], which integrated temporal reasoning. Notably, Text4Seg [27] redefined segmentation as a text generation problem using semantic descriptors, eliminating the need for an additional decoder. In remote sensing, benefiting from the above paradigms [26, 27], some unified models such as RSUniVLM [42], GeoGround [95] and GeoPix [50] are equipped with segmentation capabilities. Although based on LLM, these models focus only on explicit text-guided segmentation. Further, GeoPixel [57] introduced grounded conversation generation [53] to remote sensing, but it still does not provide reasoning capability. Our SegEarth-R1 also follows the LLM-based segmentation paradigm, but is different from previous methods. Specifically, SegEarth-R1 is the first work to support reasoning about the target region from implicit queries, and its components are specifically designed for the challenges in remote sensing." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 112, + 110, + 501, + 198 + ], + "blocks": [ + { + "bbox": [ + 104, + 70, + 504, + 103 + ], + "lines": [ + { + "bbox": [ + 104, + 70, + 504, + 103 + ], + "spans": [ + { + "bbox": [ + 104, + 70, + 504, + 103 + ], + "type": "text", + "content": "Table 1: Comparison between EarthReason and other related datasets. The gray rendering denotes the natural image dataset. \"Seg\", \"Det\", \"VG\", \"Cls\" denote segmentation, detection, visual grounding and classification datasets, respectively." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 112, + 110, + 501, + 198 + ], + "lines": [ + { + "bbox": [ + 112, + 110, + 501, + 198 + ], + "spans": [ + { + "bbox": [ + 112, + 110, + 501, + 198 + ], + "type": "table", + "html": "
DatasetMask LabelReasoning QuerySpatial resolutionImage SizeImage NumImage SourceClass Num
ReasonSeg [26]--1,218OpenImages (Seg) & ScanNetv2 (Seg)-
LLM-Seg40K [65]--14,000LVIS (Seg) & EgoObjects (Seg)-
EarthVQA [66]X0.3m102426,000LoveDA (Seg)14
RegSegRS [85]X0.5m-30m80024,420SkyScapes (Seg)14
RRSIS-D [40]X0.13m512217,402RSVGD (VG) & DIOR (OD)20
RISBench [12]X0.1m-30m512252,472DOTAv2(OD) & DIOR (OD)26
EarthReason0.5m-153m1232-761725,434AID (Cls) & fMoW (Cls)28
", + "image_path": "07ead8f5007da4af1336a18007e5e0291b9497abe37e8cec54c29f8a7d7560f2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 217, + 448, + 230 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 217, + 448, + 230 + ], + "spans": [ + { + "bbox": [ + 104, + 217, + 448, + 230 + ], + "type": "text", + "content": "3 Benchmark Geospatial Pixel Reasoning Dataset—EarthReason" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 242, + 275, + 254 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 242, + 275, + 254 + ], + "spans": [ + { + "bbox": [ + 105, + 242, + 275, + 254 + ], + "type": "text", + "content": "3.1 Comparison with Related Dataset" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 262, + 506, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 262, + 506, + 471 + ], + "spans": [ + { + "bbox": [ + 104, + 262, + 506, + 471 + ], + "type": "text", + "content": "We analyze three types of tasks and datasets related to geospatial pixel reasoning, i.e., natural image reasoning segmentation, remote sensing visual question answering (VQA), and remote sensing referring segmentation, as shown in Table 1. RefSegRS [85] and RRSIS-D [40] provide early benchmarks with image-text-mask triplets. RISBench [12], the largest RRSIS dataset to date, introduced 52,472 triplets with oriented bounding boxes and pixel-level masks generated via a semi-automatic pipeline. These datasets address the limitations of earlier text-focused datasets (e.g., RSICD [46], EarthVQA [66], etc.) and enable comprehensive evaluation of multimodal models. Compared to the previous referring segmentation datasets, our EarthReason datasets has the following features: (1) The mask labels in EarthReason are not explicitly specified by the query, but require further reasoning to determine the target, which challenges the model's reasoning ability. (2) EarthReason uses a more raw data source. The previous related datasets directly transform existing segmentation datasets [1, 67] or SAM-processed detection datasets [86, 31, 11], while our EarthReason uses images from classification datasets [44, 8] and we manually annotate them. This allows EarthReason to provide more data gain when it comes to co-training of unified segmentation tasks. (3) EarthReason has more diverse spatial resolutions and image sizes, which are conducive to solving the object scale spanning problem inherent in remote sensing images [56]. Compared to the first natural image reasoning segmentation dataset, ReasonSeg, EarthReason contains " + }, + { + "bbox": [ + 104, + 262, + 506, + 471 + ], + "type": "inline_equation", + "content": "4.46 \\times" + }, + { + "bbox": [ + 104, + 262, + 506, + 471 + ], + "type": "text", + "content": " more data than it. Therefore, we believe that EarthReason, as the first geospatial pixel reasoning dataset in the remote sensing area, is capable of performing initial explorations of this task." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 483, + 251, + 496 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 483, + 251, + 496 + ], + "spans": [ + { + "bbox": [ + 105, + 483, + 251, + 496 + ], + "type": "text", + "content": "3.2 Dataset Generation Pipeline" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 504, + 504, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 504, + 504, + 527 + ], + "spans": [ + { + "bbox": [ + 104, + 504, + 504, + 527 + ], + "type": "text", + "content": "Our benchmark dataset EarthReason is generated according to the following three steps, i.e., image collection, question-answer pair generation, and object mask labeling." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 532, + 506, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 532, + 506, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 532, + 506, + 631 + ], + "type": "text", + "content": "Image Collection. As mentioned above, to avoid potential data leakage in the future construction of unified segmentation models for remote sensing, we collect images from existing classification data. Although this increases the annotation cost, it also motivates more diverse scenes. Specifically, we first select the 28 categories that are more suitable for reasoning in the Million-AID [44] dataset, and sample about 200 images for each category. Then, we find that the actual geographic range contained in Million-AID's images is limited. Thus, we also collect 800 images in the fMoW [8] dataset to enhance the model's reasoning ability in complex scenes. Further, to alleviate the factitious illusion issue [51], we add an extra 200 empty target images (i.e., the implied target is not in the image). Finally, some low-quality images are eliminated, and we obtain a total of 5,434 images." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 635, + 504, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 635, + 504, + 702 + ], + "spans": [ + { + "bbox": [ + 104, + 635, + 504, + 702 + ], + "type": "text", + "content": "Question-Answer Pair Generation. We use GPT-4o1 to construct question-answer pairs, and given its excellent visual comprehension, we take the remote sensing image and the corresponding scene category (provided by Million-AID and fMoW) as part of the prompt to generate questions and answers that are closely related to the image. An example of such a prompt is illustrated in Appendix A.1. In addition, following [26], to make the questions and answers diverse, we adapt GPT-3.5 to rephrase the instructional questions and answers, as shown in Appendix Figure 7." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 118, + 711, + 342, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 711, + 342, + 722 + ], + "spans": [ + { + "bbox": [ + 118, + 711, + 342, + 722 + ], + "type": "text", + "content": "1https://platform.openai.com/docs/models/gpt-4o" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 107, + 73, + 504, + 205 + ], + "blocks": [ + { + "bbox": [ + 107, + 73, + 504, + 205 + ], + "lines": [ + { + "bbox": [ + 107, + 73, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 107, + 73, + 504, + 205 + ], + "type": "image", + "image_path": "f68c75612d877479ee4e3befd34d3f1b245924c81902e10e0783026023a4668c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "text", + "content": "Figure 2: Overview of the proposed SegEarth-R1 architecture. Given an image " + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "inline_equation", + "content": "X_{v}" + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "text", + "content": " and a text description " + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "inline_equation", + "content": "X_{q}" + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "text", + "content": ", a hierarchical visual encoder and a proposed connector are used to extract and compress visual tokens. Then, the visual tokens " + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "inline_equation", + "content": "\\square" + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "text", + "content": " and description embeddings " + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "inline_equation", + "content": "\\square" + }, + { + "bbox": [ + 104, + 210, + 506, + 262 + ], + "type": "text", + "content": " are fed into an LLM for instruction interpretation and semantic correlation. Finally, description embeddings are directly mapped to the query vector and used for spatial correlation and segmentation mask generation." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 280, + 506, + 347 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 506, + 347 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 506, + 347 + ], + "type": "text", + "content": "Object Mask Labeling. Different from previous referring and reasoning segmentation datasets (which use off-the-shelf masks or bounding boxes), we annotate images from scratch. Specifically, we employ multiple experts in remote sensing and vision, assign each expert a few hundred images to annotate, and cross-validate the annotations after they are completed. For simple targets (e.g., lake), SAM-H [25] is used to assist in annotation; for complex targets (e.g., wind turbine), each point of the polygon is finely marked. A description of mask quality is provided in Appendix A.1." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 351, + 505, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 351, + 505, + 429 + ], + "spans": [ + { + "bbox": [ + 104, + 351, + 505, + 429 + ], + "type": "text", + "content": "Dataset Statistics. The EarthReason dataset is partitioned into training, validation, and testing sets, comprising 2,371, 1,135, and 1,928 images, respectively. In the training set, each image is annotated with an average of six questions and three corresponding answers. The average question length is 20.86 words, while the average answer length is 26.76 words. To assess the model's generalization capability, several semantic categories are deliberately reserved for the validation and test sets, ensuring they remain unseen during training. Additional dataset details are provided in the Appendix A.2." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 451, + 432, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 451, + 432, + 465 + ], + "spans": [ + { + "bbox": [ + 104, + 451, + 432, + 465 + ], + "type": "text", + "content": "4 Baseline Geospatial Pixel Reasoning Method—SegEarth-R1" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 480, + 504, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 480, + 504, + 568 + ], + "spans": [ + { + "bbox": [ + 104, + 480, + 504, + 568 + ], + "type": "text", + "content": "Compared with natural images, remote sensing images exhibit distinctive characteristics that demand specialized architectural designs for pixel-wise geospatial reasoning. In this work, we propose SegEarth-R1, a simple yet powerful baseline for geospatial pixel reasoning that effectively harnesses LLM capabilities while incorporating domain-specific adaptations. As illustrated in Figure 2, our architecture comprises three core parts: A visual encoder for image feature extraction, an LLM for instruction interpretation and semantic correlation, and a mask generator for spatial correlation and mask prediction. Each part incorporates critical design considerations to address the unique challenges of remote sensing images." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 589, + 253, + 600 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 589, + 253, + 600 + ], + "spans": [ + { + "bbox": [ + 105, + 589, + 253, + 600 + ], + "type": "text", + "content": "4.1 Hierarchical Visual Encoder" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "text", + "content": "Satellite and aerial targets present two critical challenges: (1) extreme scale variations ranging from sub-meter objects to kilometer-scale geographical formations [56], and (2) densely distributed small objects requiring high-resolution analysis [30]. Conventional ViT-based encoders adopted in MLLMs [26, 80, 25, 75] (e.g., image encoder in CLIP [52] and SAM [25, 54]) prove suboptimal due to their fixed-scale feature extraction and information compression through aggressive patch merging. To alleviate these limitations, following [92], SegEarth-R1 employs a Swin Transformer [43] backbone enhanced with progressive feature hierarchy construction. This architecture generates multi-scale feature maps " + }, + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "inline_equation", + "content": "v_{h}, h \\in [1,4]" + }, + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "text", + "content": " at " + }, + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "inline_equation", + "content": "1/4, 1/8, 1/16, 1/32" + }, + { + "bbox": [ + 104, + 612, + 505, + 723 + ], + "type": "text", + "content": " of the original resolution through controlled downsampling operations, preserving high-resolution details for small objects while capturing contextual semantics at deeper layers." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 312, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 312, + 85 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 312, + 85 + ], + "type": "text", + "content": "4.2 Large Language Model and Input Schema" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 92, + 504, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 92, + 504, + 148 + ], + "spans": [ + { + "bbox": [ + 104, + 92, + 504, + 148 + ], + "type": "text", + "content": "SegEarth-R1 adopts the MLLM paradigm [37, 29] by jointly embedding visual tokens and textual instructions into a unified LLM input space for multimodal reasoning. Unlike natural images, remote sensing data exhibits ultra-high-resolution coverage [23, 64], posing computational challenges when processed through billion-level LLMs. Therefore, we expect to compress the visual token to alleviate the computational cost and make only simple semantic correlations in LLM." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 159, + 194, + 170 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 159, + 194, + 170 + ], + "spans": [ + { + "bbox": [ + 105, + 159, + 194, + 170 + ], + "type": "text", + "content": "4.2.1 Visual Token" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 178, + 504, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 223 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 223 + ], + "type": "text", + "content": "Redundancy Analysis. Image redundancy quantifies the proportion of compressible, non-informative data within an image. To investigate the feasibility of aggressive visual token compression for remote sensing images, we conduct a redundancy analysis from dual perspectives: pixel-level statistical redundancy and spatial structural redundancy." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "spans": [ + { + "bbox": [ + 104, + 232, + 504, + 277 + ], + "type": "text", + "content": "- According to information theory [59], entropy measures the average uncertainty or information content of an image, while the maximum entropy corresponds to the idealized scenario where pixel values are uniformly distributed (i.e., no redundancy). Thus, from the entropy perspective, the image redundancy can be defined as [14]:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 238, + 281, + 505, + 310 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 281, + 505, + 310 + ], + "spans": [ + { + "bbox": [ + 238, + 281, + 505, + 310 + ], + "type": "interline_equation", + "content": "R _ {e} = 1 - \\frac {- \\sum_ {l = 0} ^ {L - 1} p (l) \\log_ {2} p (l)}{\\log_ {2} L}, \\tag {1}", + "image_path": "820d4bbd8c8037bc39591915e14ea1faf08f55353abad5a2259126f713034a38.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "spans": [ + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "inline_equation", + "content": "L" + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "content": " denotes the number of distinct intensity levels (e.g., " + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "inline_equation", + "content": "L = 256" + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "content": " for an 8-bit grayscale image), and " + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "inline_equation", + "content": "p(l)" + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "content": " denotes the probability mass function of the pixel intensity value " + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "inline_equation", + "content": "l" + }, + { + "bbox": [ + 111, + 316, + 505, + 339 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "spans": [ + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": "- Beyond pixel-level statistical redundancy, structural self-similarity reflects spatial redundancy caused by repetitive patterns (e.g., textures, geometric features). To quantify this, we leverage the Structural Similarity Index Matrix (SSIM) [70] to measure inter-patch similarity. For an image partitioned into " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": " patches, the SSIM matrix " + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "inline_equation", + "content": "\\mathbf{M} \\in \\mathbb{R}^{N \\times N}" + }, + { + "bbox": [ + 104, + 348, + 504, + 392 + ], + "type": "text", + "content": " is defined as:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 180, + 398, + 505, + 427 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 398, + 505, + 427 + ], + "spans": [ + { + "bbox": [ + 180, + 398, + 505, + 427 + ], + "type": "interline_equation", + "content": "\\mathbf {M} (i, j) = \\frac {(2 \\mu_ {i} \\mu_ {j} + C _ {1}) (2 \\sigma_ {i j} + C _ {2})}{(\\mu_ {i} ^ {2} + \\mu_ {j} ^ {2} + C _ {1}) (\\sigma_ {i} ^ {2} + \\sigma_ {j} ^ {2} + C _ {2})}, \\quad \\forall i, j \\in 1, \\dots , N \\tag {2}", + "image_path": "6b0e75a6bcd18a0617f3eaf4d14243d21e995e52771d154683c5f24aade7fcc6.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "spans": [ + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\mu_{i},\\sigma_{i}" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " denote the mean and variance of the " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " -th patch, " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\sigma_{ij}" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " is the covariance between patches " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "C_1,C_2" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " are stability constants. Then, the structural self-similarity redundancy " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "R_{s}" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": " is derived by averaging off-diagonal elements of " + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "inline_equation", + "content": "\\mathbf{M}" + }, + { + "bbox": [ + 111, + 431, + 504, + 464 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 246, + 469, + 505, + 500 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 246, + 469, + 505, + 500 + ], + "spans": [ + { + "bbox": [ + 246, + 469, + 505, + 500 + ], + "type": "interline_equation", + "content": "R _ {s} = \\frac {1}{N (N - 1)} \\sum_ {i \\neq j} \\mathbf {M} (i, j). \\tag {3}", + "image_path": "3776661b246a2704a8f32afe1833b4a0a2c68b8ab637443163a5695fc0cdb152.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 508, + 258, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 508, + 258, + 629 + ], + "spans": [ + { + "bbox": [ + 104, + 508, + 258, + 629 + ], + "type": "text", + "content": "We evaluate six benchmark datasets spanning natural images (COCO [3], ADE20K [93], PASCAL [13]) and remote sensing images (LoveDA [67], DeepGlobe [9], xBD [15]) for redundancy analysis. As shown in Figure 3, our analysis reveals two critical findings: 1) Remote sensing images demonstrate " + }, + { + "bbox": [ + 104, + 508, + 258, + 629 + ], + "type": "inline_equation", + "content": "1.9\\sim 3.3\\times" + }, + { + "bbox": [ + 104, + 508, + 258, + 629 + ], + "type": "text", + "content": " higher entropic redundancy than natural images, indicating greater pixel-level" + } + ] + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 271, + 517, + 382, + 582 + ], + "blocks": [ + { + "bbox": [ + 271, + 517, + 382, + 582 + ], + "lines": [ + { + "bbox": [ + 271, + 517, + 382, + 582 + ], + "spans": [ + { + "bbox": [ + 271, + 517, + 382, + 582 + ], + "type": "image", + "image_path": "83befbd5344d600c990778748b796f239ff6028bc3f8f1dd0071afa497f815a3.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 278, + 587, + 374, + 598 + ], + "lines": [ + { + "bbox": [ + 278, + 587, + 374, + 598 + ], + "spans": [ + { + "bbox": [ + 278, + 587, + 374, + 598 + ], + "type": "text", + "content": "(a) pixel-level redundancy" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 384, + 587, + 500, + 598 + ], + "lines": [ + { + "bbox": [ + 384, + 587, + 500, + 598 + ], + "spans": [ + { + "bbox": [ + 384, + 587, + 500, + 598 + ], + "type": "text", + "content": "(b) spatial structure redundancy" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 387, + 517, + 498, + 581 + ], + "blocks": [ + { + "bbox": [ + 387, + 517, + 498, + 581 + ], + "lines": [ + { + "bbox": [ + 387, + 517, + 498, + 581 + ], + "spans": [ + { + "bbox": [ + 387, + 517, + 498, + 581 + ], + "type": "image", + "image_path": "fb419c92310842f501e960cafb412095b70d4f48b6db1426292df9d680961416.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 263, + 605, + 504, + 626 + ], + "lines": [ + { + "bbox": [ + 263, + 605, + 504, + 626 + ], + "spans": [ + { + "bbox": [ + 263, + 605, + 504, + 626 + ], + "type": "text", + "content": "Figure 3: Redundancy analysis of remote sensing datasets and natural images, and the former exhibits higher redundancy." + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "spans": [ + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "text", + "content": "compressibility. 2) The average self-similarity for remote sensing data exceeds natural images by " + }, + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "inline_equation", + "content": "42.6\\%" + }, + { + "bbox": [ + 104, + 629, + 504, + 663 + ], + "type": "text", + "content": ", confirming the higher prevalence of repetitive textures and geometric patterns. This insight justifies aggressive token compression for semantic-level comprehension in remote sensing images." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 666, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 505, + 723 + ], + "type": "text", + "content": "Token Compression Connector. In modern MLLM, connectors such as Q-Former [29] and MLP [37] are designed to transform visual tokens into a multi-modal space. However, some works [4, 82] point out that Q-Former may lead to loss of vision information and is difficult to train. Therefore, in SegEarth-R1, we follow the MLP connector fashion in LLaVA [37] and use a simple but effective connector, i.e., stacked convolutional blocks and Layer Normalization (LN). Here, convolutional" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 95 + ], + "type": "text", + "content": "blocks are used for spatial down-sampling to compress the size of the feature map, and LN is used to stabilize cross-modal training. Specifically, our connector can be formulated as:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 247, + 97, + 505, + 111 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 97, + 505, + 111 + ], + "spans": [ + { + "bbox": [ + 247, + 97, + 505, + 111 + ], + "type": "interline_equation", + "content": "v _ {o u t} = \\left(\\operatorname {C o n v} \\circ L N\\right) ^ {d} \\left(v _ {4}\\right), \\tag {4}", + "image_path": "111d5a3571b67592dfe806350929861d98d9bf966abdfc8ad4b032f32e50a48f.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "spans": [ + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "inline_equation", + "content": "\\circ" + }, + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "text", + "content": " denotes the function composition operator, and " + }, + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 112, + 488, + 124 + ], + "type": "text", + "content": " denotes the number of stacked layers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 134, + 206, + 146 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 134, + 206, + 146 + ], + "spans": [ + { + "bbox": [ + 105, + 134, + 206, + 146 + ], + "type": "text", + "content": "4.2.2 Text Instruction" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 153, + 506, + 220 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 153, + 506, + 220 + ], + "spans": [ + { + "bbox": [ + 104, + 153, + 506, + 220 + ], + "type": "text", + "content": "Although the instructions involved in geospatial pixel reasoning are implicit and contain more words than referring segmentation, they still maintain the same data format. Therefore, it is easy to convert them into question-answer pairs using a template like \"USER: This is an image , please doing geospatial pixel reasoning according to the following instruction: . ASSISTANT: " + }, + { + "bbox": [ + 116, + 710, + 315, + 722 + ], + "type": "text", + "content": " in text instruction." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "text", + "content": "Network Architecture. Unless otherwise specified, SegEarth-R1 use phi-1.5 (1.3B) [33] as the LLM, and adopt the Swin-B as the visual encoder. The token compression connector is configured with a layer number " + }, + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "inline_equation", + "content": "d = 2" + }, + { + "bbox": [ + 104, + 72, + 504, + 117 + ], + "type": "text", + "content": ". The mask generator follows the Mask2Former architecture, but removes mask tokens as mentioned above." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "spans": [ + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "content": "Implementation details. During training, we use bf16 precision and freeze the visual encoder. The LLM is initialized from Phi-1.5, while both the Swin-B encoder and the mask generator are initialized with pretrained weights from Mask2Former. All images are resized to " + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "inline_equation", + "content": "1024 \\times 1024" + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "content": ", maintaining the original aspect ratio by padding the shorter side. We adopt the AdamW optimizer with an initial learning rate of " + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-4}" + }, + { + "bbox": [ + 104, + 121, + 506, + 200 + ], + "type": "text", + "content": ", cosine learning rate schedule, and no weight decay. A uniform batch size of 16 is used across datasets, with training steps set to 7,610 (RRSIS-D), 5,400 (RefSegRS), and 2,220 (EarthReason). All experiments are conducted on two NVIDIA A100 80GB GPUs." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 223, + 280, + 234 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 223, + 280, + 234 + ], + "spans": [ + { + "bbox": [ + 105, + 223, + 280, + 234 + ], + "type": "text", + "content": "5.2 Geospatial Pixel Reasoning Results" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 110, + 277, + 382, + 338 + ], + "blocks": [ + { + "bbox": [ + 104, + 251, + 386, + 271 + ], + "lines": [ + { + "bbox": [ + 104, + 251, + 386, + 271 + ], + "spans": [ + { + "bbox": [ + 104, + 251, + 386, + 271 + ], + "type": "text", + "content": "Table 2: Geospatial pixel reasoning results among SegEarth-R1 (ours) and previous related works." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 110, + 277, + 382, + 338 + ], + "lines": [ + { + "bbox": [ + 110, + 277, + 382, + 338 + ], + "spans": [ + { + "bbox": [ + 110, + 277, + 382, + 338 + ], + "type": "table", + "html": "
MethodVisual EncoderLLM TypecloUgloU
ValTestValTest
LISA [26]CLIP-LVicuna-7B [7]57.3959.1061.0460.88
PixelLM [55]CLIP-LVicuna-7B [7]57.7959.2257.9460.01
PSALM [92]Swin-Bphi-1.5 (1.3B) [33]62.0364.6166.6168.30
SegEarth-R1Swin-Bphi-1.5 (1.3B) [33]64.1368.2568.6070.75
", + "image_path": "626e52b1d30a91189d3148a9dff60dcfc5c62d67257be0fb854d0081f77df054.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "spans": [ + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "text", + "content": "LISA and PixelLM demonstrate comparable performance; however, despite leveraging larger LLM or MLLM, the quality of their predicted segmentation masks remains suboptimal. This can be primarily attributed to their reliance on CLIP as the visual encoder, which tends to diminish the representation of small-scale geospatial targets. As one of the baselines of SegEarth-R1, PSALM achieves notable improvements over LISA and PixelLM. Nevertheless, PSALM does not adequately incorporate LLM-based segmentation and the Mask2Former paradigm, and lacks considerations for overhead images. SegEarth-R1 achieves the best results on both metrics surpassing PSALM by " + }, + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "inline_equation", + "content": "3.64\\%" + }, + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "inline_equation", + "content": "2.45\\%" + }, + { + "bbox": [ + 104, + 345, + 504, + 445 + ], + "type": "text", + "content": " on the test set. Importantly, SegEarth-R1 uses fewer visual tokens in LLM and reduces the number of queries in the mask generator, thus providing a lower inference cost." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 392, + 247, + 506, + 345 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 247, + 506, + 345 + ], + "spans": [ + { + "bbox": [ + 392, + 247, + 506, + 345 + ], + "type": "text", + "content": "We conduct a comparative evaluation of SOTA LLM-based methods and SegEarth-R1 on the Earth-Reason dataset. As shown in Table 2, all models are trained solely on the training split of EarthReason to ensure a fair comparison." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 468, + 266, + 479 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 468, + 266, + 479 + ], + "spans": [ + { + "bbox": [ + 105, + 468, + 266, + 479 + ], + "type": "text", + "content": "5.3 Referring Segmentation Results" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "text", + "content": "SegEarth-R1 also supports basic explicit language-guided segmentation. As shown in Table 3, we compare its performance with existing SOTA traditional methods (not based on LLM) as well as recent LLM-based methods. Notably, prior to SegEarth-R1, LLM-based methods consistently underperformed in comparison to traditional methods on the referring segmentation task. For instance, the advanced GeoGround [95] lags behind RMSIN [40] by " + }, + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "inline_equation", + "content": "3.7\\%" + }, + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "text", + "content": " in terms of gIoU on the RRSIS-D dataset. In contrast, SegEarth-R1, as a universal LLM-based language-guided segmentation method, surpasses traditional methods on the referring segmentation task for the first time with a " + }, + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "inline_equation", + "content": "2.2\\%" + }, + { + "bbox": [ + 104, + 492, + 278, + 689 + ], + "type": "text", + "content": " improvement. This result highlights the enhanced general" + } + ] + } + ], + "index": 8 + }, + { + "type": "table", + "bbox": [ + 291, + 523, + 499, + 675 + ], + "blocks": [ + { + "bbox": [ + 283, + 496, + 504, + 517 + ], + "lines": [ + { + "bbox": [ + 283, + 496, + 504, + 517 + ], + "spans": [ + { + "bbox": [ + 283, + 496, + 504, + 517 + ], + "type": "text", + "content": "Table 3: Referring segmentation results among SegEarth-R1 and previous related works on RRSIS-D dataset." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 291, + 523, + 499, + 675 + ], + "lines": [ + { + "bbox": [ + 291, + 523, + 499, + 675 + ], + "spans": [ + { + "bbox": [ + 291, + 523, + 499, + 675 + ], + "type": "table", + "html": "
MethodP@0.5cloUgloU
ValTestValTestValTest
Traditional method:
RRN [32]CVPR'1851.0951.0766.5366.4346.0645.64
CSMC [83]CVPR'1955.6855.3269.3969.3948.8548.54
LSCM [22]ECCV'2057.1256.0269.0569.2850.3649.92
CMPC [21]CVPR'2057.9355.8369.2269.3950.4149.24
BRINet [20]CVPR'2058.7956.9070.7369.8851.1449.65
CMPC+ [39]TPAMI'2059.1957.6570.1468.6451.4150.24
LGCE [85]TGRS'2468.1067.6576.6876.3460.1659.37
RIS-DMMI [19]CVPR'2370.4068.7477.0176.2060.7260.12
LAVT [81]CVPR'2269.5469.5277.5977.1961.4661.04
RMSIN [40]CVPR'2474.6674.2678.2777.7965.1064.20
LLM-based method:
LISA [26]CVPR'2427.0724.51--27.8426.78
PixelLM [55]CVPR'2433.4628.81--33.8931.65
NEXT-Chat [87]arXiv'2328.9726.37--26.9824.98
GeoGround [95]arXiv'2568.6967.50--61.1060.50
SegEarth-R178.6276.9678.9278.0167.5666.40
", + "image_path": "0f88521154e49797916ce7004643aedd03775ca6d240ab8fe3dc4e9d7ad29321.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": "ization capability and practical potential of SegEarth-R1. On the RefSegRS dataset, the improvement of SegEarth-R1 is more significant than the previous method, with an " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "8.33\\%" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "inline_equation", + "content": "9.87\\%" + }, + { + "bbox": [ + 104, + 689, + 504, + 723 + ], + "type": "text", + "content": " improvement over RMSIN on the validation and testing sets, respectively, as listed in Table 4." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 107, + 88, + 504, + 213 + ], + "blocks": [ + { + "bbox": [ + 106, + 78, + 503, + 88 + ], + "lines": [ + { + "bbox": [ + 106, + 78, + 503, + 88 + ], + "spans": [ + { + "bbox": [ + 106, + 78, + 503, + 88 + ], + "type": "text", + "content": "Table 4: Referring segmentation results among SegEarth-R1 and previous related works on RefSegRS dataset." + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 107, + 88, + 504, + 213 + ], + "lines": [ + { + "bbox": [ + 107, + 88, + 504, + 213 + ], + "spans": [ + { + "bbox": [ + 107, + 88, + 504, + 213 + ], + "type": "table", + "html": "
MethodP@0.5P@0.6P@0.7P@0.8P@0.9cloUgloU
ValTestValTestValTestValTestValTestValTestValTest
BRINet [20]CVPR'2036.8620.7235.5314.2619.939.8710.662.982.841.1461.5958.2238.7331.51
LSCM [22]ECCV'2056.8231.5441.2420.4121.859.5112.115.292.510.8462.8261.2740.5935.54
CMPC [21]CVPR'2046.0932.3626.4514.1412.766.557.421.761.390.2263.5555.3942.0840.63
CMSA [83]CVPR'1939.2428.0738.4420.2520.3912.7111.795.611.520.8365.8464.5343.6241.47
RRN [32]CVPR'1855.4330.2642.9823.0123.1114.8713.727.172.640.9869.2465.0650.8141.88
EVF-SAM [91]Arxiv'2457.7735.1737.5922.3416.249.364.872.861.860.3959.6155.5146.9836.64
CMPC+ [39]TPAMI'2156.8449.1937.5928.3120.4215.3110.678.122.780.5570.6266.5347.1343.65
CARIS [41]ACMMM'2368.4545.4047.1027.1925.5215.0814.628.873.711.9875.7969.7454.3042.66
CRIS [69]CVPR'2253.1335.7736.1924.1124.3614.3611.836.382.551.2172.1465.8753.7443.26
LAVT [81]CVPR'2280.9751.8458.7030.2731.0917.3415.559.524.642.0978.5071.8661.5347.40
RIS-DMMI [19]CVPR'2386.1763.8974.7144.3038.0519.8118.106.493.251.0074.0268.5865.7252.15
LGCE [85]TGRS'2490.7273.7586.3161.1471.9339.4632.9516.0210.215.4583.5676.8172.5159.96
RMSIN [40]CVPR'2493.9779.2089.3365.9974.2542.9829.7016.517.893.2582.4175.7273.8462.58
SegEarth-R195.8286.3093.2779.5388.8669.5778.1948.8722.0410.7385.0179.0082.1772.45
", + "image_path": "c1fae4041eba706b24a7df9c0e3dce21c67da5799853e8d45371e6e5a8da4d35.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 108, + 234, + 178, + 304 + ], + "blocks": [ + { + "bbox": [ + 108, + 234, + 178, + 304 + ], + "lines": [ + { + "bbox": [ + 108, + 234, + 178, + 304 + ], + "spans": [ + { + "bbox": [ + 108, + 234, + 178, + 304 + ], + "type": "image", + "image_path": "97c77ed6388f2c870fddee56db8381e6bba913581803ca912ab29deb5ea07b5b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 179, + 236, + 235, + 299 + ], + "lines": [ + { + "bbox": [ + 179, + 236, + 235, + 299 + ], + "spans": [ + { + "bbox": [ + 179, + 236, + 235, + 299 + ], + "type": "text", + "content": "USER: In this region, what infrastructure changes could best support an extended growing season for local agricultural crops? ASISTANT: The greenhouse creates a controlled environment that shields plants from harsh weather, harmful insects, and infectious diseases, all of which can threaten the health of the plant." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 112, + 312, + 496, + 324 + ], + "lines": [ + { + "bbox": [ + 112, + 312, + 496, + 324 + ], + "spans": [ + { + "bbox": [ + 112, + 312, + 496, + 324 + ], + "type": "text", + "content": "Figure 5: Qualitative Results of SegEarth-R1 on EarthReason. More results can be found in Appendix C." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 238, + 235, + 306, + 304 + ], + "blocks": [ + { + "bbox": [ + 238, + 235, + 306, + 304 + ], + "lines": [ + { + "bbox": [ + 238, + 235, + 306, + 304 + ], + "spans": [ + { + "bbox": [ + 238, + 235, + 306, + 304 + ], + "type": "image", + "image_path": "bb2e1961765507e81bf0f6b605b6e42bb453f5cff362f055ab7b4b35d26381c2.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 307, + 236, + 378, + 304 + ], + "lines": [ + { + "bbox": [ + 307, + 236, + 378, + 304 + ], + "spans": [ + { + "bbox": [ + 307, + 236, + 378, + 304 + ], + "type": "text", + "content": "USER: Which types of facilities would aid small boat docking in this coastal area, while also contributing positively to tourism and local fisheries? \nASSISTANT: A pier is a man-made construction that extends over the water's surface, acting as a convenient spot for boats to moor while also providing easy access for passengers to get on and off. It serves as a vital hub for the maritime industry, ensuring smooth operations and timely delivery of products." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 381, + 235, + 450, + 304 + ], + "blocks": [ + { + "bbox": [ + 381, + 235, + 450, + 304 + ], + "lines": [ + { + "bbox": [ + 381, + 235, + 450, + 304 + ], + "spans": [ + { + "bbox": [ + 381, + 235, + 450, + 304 + ], + "type": "image", + "image_path": "64c75a815a92b0c337ccdef9e0c770c2624c1b957678d0c0f48409b52639869c.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 451, + 236, + 501, + 304 + ], + "lines": [ + { + "bbox": [ + 451, + 236, + 501, + 304 + ], + "spans": [ + { + "bbox": [ + 451, + 236, + 501, + 304 + ], + "type": "text", + "content": "USER: What locations facilitate the takeoff and landing of aircraft under supervision from air traffic controllers in a regulated setting? \nASSISTANT: The airport runway is meticulously crafted to facilitate the safe ascent and descent of aircraft, ensuring that every flight begins and ends with precision." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 343, + 195, + 357 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 343, + 195, + 357 + ], + "spans": [ + { + "bbox": [ + 105, + 343, + 195, + 357 + ], + "type": "text", + "content": "5.4 Ablation Study" + } + ] + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 112, + 415, + 321, + 487 + ], + "blocks": [ + { + "bbox": [ + 104, + 368, + 328, + 409 + ], + "lines": [ + { + "bbox": [ + 104, + 368, + 328, + 409 + ], + "spans": [ + { + "bbox": [ + 104, + 368, + 328, + 409 + ], + "type": "text", + "content": "Table 5: Ablation of SegEarth-R1 components on EarthReason: query description embedding (Query D.E.), description projector " + }, + { + "bbox": [ + 104, + 368, + 328, + 409 + ], + "type": "inline_equation", + "content": "(D" + }, + { + "bbox": [ + 104, + 368, + 328, + 409 + ], + "type": "text", + "content": " -Projectile), token compression connector (T.C. Connector)." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 112, + 415, + 321, + 487 + ], + "lines": [ + { + "bbox": [ + 112, + 415, + 321, + 487 + ], + "spans": [ + { + "bbox": [ + 112, + 415, + 321, + 487 + ], + "type": "table", + "html": "
Query D.E.D-ProjectorT.C. ConnectorcloUgloU
ValTestValTest
XXX62.0364.6166.6168.30
XX63.3466.1967.4269.15
XX63.3266.3167.2269.21
XX63.4765.4168.3169.20
X64.1266.7168.6169.61
64.1368.2568.6070.75
", + "image_path": "e874fa801bbf6562c1840a990ffd4cc51dc1bf857b6a6d160c17d709d5e5430e.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 345, + 386, + 489, + 433 + ], + "blocks": [ + { + "bbox": [ + 331, + 368, + 499, + 380 + ], + "lines": [ + { + "bbox": [ + 331, + 368, + 499, + 380 + ], + "spans": [ + { + "bbox": [ + 331, + 368, + 499, + 380 + ], + "type": "text", + "content": "Table 6: Ablation of LLM type on RRSIS-D." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 345, + 386, + 489, + 433 + ], + "lines": [ + { + "bbox": [ + 345, + 386, + 489, + 433 + ], + "spans": [ + { + "bbox": [ + 345, + 386, + 489, + 433 + ], + "type": "table", + "html": "
LLM TypecloUgloU
ValTestValTest
phi-1.5 (1.3B)78.9278.0167.5666.40
phi-2 (2B)78.9878.3567.9166.67
Qwen2.5 (0.5B)78.5377.8767.7066.49
", + "image_path": "a22e302b05e8e7f61e8118a42801d303ff0685a00d8e30275a530a2156b8560b.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_body" + } + ], + "index": 13 + }, + { + "type": "table", + "bbox": [ + 339, + 455, + 494, + 485 + ], + "blocks": [ + { + "bbox": [ + 328, + 437, + 501, + 448 + ], + "lines": [ + { + "bbox": [ + 328, + 437, + 501, + 448 + ], + "spans": [ + { + "bbox": [ + 328, + 437, + 501, + 448 + ], + "type": "text", + "content": "Table 7: Ablation of " + }, + { + "bbox": [ + 328, + 437, + 501, + 448 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 328, + 437, + 501, + 448 + ], + "type": "text", + "content": " on EarthReason Val set." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 339, + 455, + 494, + 485 + ], + "lines": [ + { + "bbox": [ + 339, + 455, + 494, + 485 + ], + "spans": [ + { + "bbox": [ + 339, + 455, + 494, + 485 + ], + "type": "table", + "html": "
d#Visual TokengIoU| | d#Visual TokengIoU
0102468.2826468.60
125668.4731668.22
", + "image_path": "7ff79a3cf7322060499f5dd9bfc86b7f0a71adf84720a93f08a6b713b74830aa.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "table_body" + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "text", + "content": "Components. We conduct ablation studies on the EarthReason dataset to evaluate the effectiveness of the novel components involved in SegEarth-R1. As listed in Table 5, the first row shows the results of the PSALM baseline. Each proposed component contributes to performance enhancement, yielding improvements ranging from " + }, + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "inline_equation", + "content": "0.85\\%" + }, + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "inline_equation", + "content": "0.9\\%" + }, + { + "bbox": [ + 104, + 492, + 506, + 592 + ], + "type": "text", + "content": ". The T.C. Connector and Query D.E. not only enhances performance but also reduces computational overhead. Further, the proposed components can be well coupled, and when they are all activated, i.e., complete SegEarth-R1, all metrics exhibit substantial gains over the baseline, confirming the effectiveness and compatibility of the proposed design. In fact, although these components are initially designed with remote sensing scenarios in mind, their underlying principles offer transferable insights applicable to general image understanding." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 104, + 596, + 506, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 596, + 506, + 641 + ], + "spans": [ + { + "bbox": [ + 104, + 596, + 506, + 641 + ], + "type": "text", + "content": "LLM Type. Given the limited scale of the dataset, we select some small LLM for comparison, as presented in Table 6. SegEarth-R1 demonstrates consistently high performance across different LLM, indicating the robustness and architectural stability of the overall framework. Notably, with Qwen2.5 (0.5B) [79], it still achieves competitive results, indicating its potential for edge deployment." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "Layer Number of T.C. Connector. The layer number " + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": " controls the number of visual tokens fed into the LLM. As shown in Table 7, increasing token quantity does not improve performance. This observation aligns with our earlier analysis, suggesting that appropriate compression of visual tokens is beneficial for the global understanding of a remote sensing image. In SegEarth-R1, spatial correlations between the image and the instruction are primarily handled by the mask generator, while the LLM is only responsible for relatively semantic correlations. This division of labor allows for more efficient use of computational resources without compromising performance." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 185, + 84 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 217 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 217 + ], + "type": "text", + "content": "In this paper, we introduce geospatial pixel reasoning, a new task in remote sensing that requires models to infer segmentation masks from implicit natural language queries by reasoning over spatial context and domain knowledge. To enable research in this direction, we present EarthReason, the first large-scale benchmark dataset that emphasises complex reasoning scenarios. To address the distinct challenges inherent in remote sensing, we propose SegEarth-R1, a language-guided segmentation model that integrates a hierarchical visual encoder, an LLM for instruction parsing and semantic correlation, and a tailored mask generator designed for spatial correlation. Extensive experiments validate SegEarth-R1's superiority, achieving SOTA performance on both geospatial pixel reasoning and referring segmentation tasks. This work pioneers the fusion of natural language reasoning with pixel-level geospatial analysis, offering transformative potential for applications like environmental monitoring and disaster response." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 231, + 165, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 231, + 165, + 243 + ], + "spans": [ + { + "bbox": [ + 106, + 231, + 165, + 243 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 250, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 111, + 250, + 505, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 250, + 505, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 250, + 505, + 285 + ], + "type": "text", + "content": "[1] Seyed Majid Azimi, Corentin Henry, Lars Sommer, Arne Schumann, and Eleonora Vig. Skyscapes fine-grained semantic understanding of aerial scenes. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 7393-7403, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 290, + 505, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 290, + 505, + 325 + ], + "spans": [ + { + "bbox": [ + 111, + 290, + 505, + 325 + ], + "type": "text", + "content": "[2] Lucas Beyer, Andreas Steiner, André Susano Pinto, Alexander Kolesnikov, Xiao Wang, Daniel Salz, Maxim Neumann, Ibrahim Alabdulmohsin, Michael Tschannen, Emanuele Bugliarello, et al. Paligemma: A versatile 3b vlm for transfer. arXiv preprint arXiv:2407.07726, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 331, + 505, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 331, + 505, + 365 + ], + "spans": [ + { + "bbox": [ + 111, + 331, + 505, + 365 + ], + "type": "text", + "content": "[3] Holger Caesar, Jasper Uijlings, and Vittorio Ferrari. Coco-stuff: Thing and stuff classes in context. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 1209-1218, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 372, + 505, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 372, + 505, + 406 + ], + "spans": [ + { + "bbox": [ + 111, + 372, + 505, + 406 + ], + "type": "text", + "content": "[4] Junbum Cha, Wooyoung Kang, Jonghwan Mun, and Byungseok Roh. Honeybee: Locality-enhanced projector for multimodal llm. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13817-13827, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 412, + 505, + 446 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 412, + 505, + 446 + ], + "spans": [ + { + "bbox": [ + 111, + 412, + 505, + 446 + ], + "type": "text", + "content": "[5] Bowen Cheng, Ishan Misra, Alexander G Schwing, Alexander Kirillov, and Rohit Girdhar. Masked-attention mask transformer for universal image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 1290–1299, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 453, + 505, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 453, + 505, + 486 + ], + "spans": [ + { + "bbox": [ + 111, + 453, + 505, + 486 + ], + "type": "text", + "content": "[6] Bowen Cheng, Alex Schwing, and Alexander Kirillov. Per-pixel classification is not all you need for semantic segmentation. Advances in neural information processing systems, 34:17864-17875, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 493, + 505, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 493, + 505, + 538 + ], + "spans": [ + { + "bbox": [ + 111, + 493, + 505, + 538 + ], + "type": "text", + "content": "[7] Wei-Lin Chiang, Zhuohan Li, Ziqing Lin, Ying Sheng, Zhanghao Wu, Hao Zhang, Lianmin Zheng, Siyuan Zhuang, Yonghao Zhuang, Joseph E Gonzalez, et al. Vicuna: An open-source chatbot impressing gpt-4 with " + }, + { + "bbox": [ + 111, + 493, + 505, + 538 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 111, + 493, + 505, + 538 + ], + "type": "text", + "content": " * chatgpt quality. See https://vicuna.lmsys.org (accessed 14 April 2023), 2(3):6, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "spans": [ + { + "bbox": [ + 111, + 544, + 505, + 578 + ], + "type": "text", + "content": "[8] Gordon Christie, Neil Fendley, James Wilson, and Ryan Mukherjee. Functional map of the world. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 6172-6180, 2018." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 586, + 505, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 586, + 505, + 630 + ], + "spans": [ + { + "bbox": [ + 111, + 586, + 505, + 630 + ], + "type": "text", + "content": "[9] Ilke Demir, Krzysztof Koperski, David Lindenbaum, Guan Pang, Jing Huang, Saikat Basu, Forest Hughes, Devis Tuia, and Ramesh Raskar. Deep globe 2018: A challenge to parse the earth through satellite images. In Proceedings of the IEEE conference on computer vision and pattern recognition workshops, pages 172-181, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 636, + 505, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 636, + 505, + 670 + ], + "spans": [ + { + "bbox": [ + 106, + 636, + 505, + 670 + ], + "type": "text", + "content": "[10] Henghui Ding, Chang Liu, Suchen Wang, and Xudong Jiang. Vision-language transformer and query generation for referring segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 16321-16330, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "text", + "content": "[11] Jian Ding, Nan Xue, Gui-Song Xia, Xiang Bai, Wen Yang, Michael Ying Yang, Serge Belongie, Jiebo Luo, Mihai Datcu, Marcello Pelillo, et al. Object detection in aerial images: A large-scale benchmark and challenges. IEEE transactions on pattern analysis and machine intelligence, 44(11):7778-7796, 2021." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 312, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 507, + 723 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 105 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 105 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 105 + ], + "type": "text", + "content": "[12] Zhe Dong, Yuzhe Sun, Yanfeng Gu, and Tianzhu Liu. Cross-modal bidirectional interaction model for referring remote sensing image segmentation. arXiv preprint arXiv:2410.08613, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 507, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 507, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 507, + 148 + ], + "type": "text", + "content": "[13] Mark Everingham, Luc Van Gool, Christopher KI Williams, John Winn, and Andrew Zisserman. The pascal visual object classes (voc) challenge. International journal of computer vision, 88:303-338, 2010." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 156, + 504, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 156, + 504, + 178 + ], + "spans": [ + { + "bbox": [ + 107, + 156, + 504, + 178 + ], + "type": "text", + "content": "[14] Rafael C Gonzales and Paul Wintz. Digital image processing. Addison-Wesley Longman Publishing Co., Inc., 1987." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 186, + 504, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 186, + 504, + 220 + ], + "spans": [ + { + "bbox": [ + 106, + 186, + 504, + 220 + ], + "type": "text", + "content": "[15] Ritwik Gupta, Richard Hosfelt, Sandra Sajeev, Nirav Patel, Bryce Goodman, Jigar Doshi, Eric Heim, Howie Choset, and Matthew Gaston. xbd: A dataset for assessing building damage from satellite imagery. arXiv preprint arXiv:1911.09296, 2019." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 228, + 507, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 228, + 507, + 271 + ], + "spans": [ + { + "bbox": [ + 106, + 228, + 507, + 271 + ], + "type": "text", + "content": "[16] Junwen He, Yifan Wang, Lijun Wang, Huchuan Lu, Jun-Yan He, Jin-Peng Lan, Bin Luo, and Xuansong Xie. Multi-modal instruction tuned llms with fine-grained visual perception. In Proceedings of the IEEE/cvf conference on computer vision and pattern recognition, pages 13980-13990, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 280, + 504, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 280, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 106, + 280, + 504, + 324 + ], + "type": "text", + "content": "[17] Ngoc-Vuong Ho, Thinh Phan, Meredith Adkins, Chase Rainwater, Jackson Cothren, and Ngan Le. Rssep: Sequence-to-sequence model for simultaneous referring remote sensing segmentation and detection. In Proceedings of the Asian Conference on Computer Vision, pages 218-231, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 333, + 504, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 333, + 504, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 333, + 504, + 367 + ], + "type": "text", + "content": "[18] Ronghang Hu, Marcus Rohrbach, and Trevor Darrell. Segmentation from natural language expressions. In Computer Vision-ECCV 2016: 14th European Conference, Amsterdam, The Netherlands, October 11-14, 2016, Proceedings, Part I 14, pages 108-124. Springer, 2016." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "spans": [ + { + "bbox": [ + 105, + 374, + 506, + 409 + ], + "type": "text", + "content": "[19] Yutao Hu, Qixiong Wang, Wenqi Shao, Enze Xie, Zhenguo Li, Jungong Han, and Ping Luo. Beyond one-to-one: Rethinking the referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 4067-4077, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 416, + 504, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 416, + 504, + 451 + ], + "spans": [ + { + "bbox": [ + 106, + 416, + 504, + 451 + ], + "type": "text", + "content": "[20] Zhiwei Hu, Guang Feng, Jiayu Sun, Lihe Zhang, and Huchuan Lu. Bi-directional relationship inferring network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 4424-4433, 2020." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 457, + 506, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 457, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 106, + 457, + 506, + 502 + ], + "type": "text", + "content": "[21] Shaofei Huang, Tianrui Hui, Si Liu, Guanbin Li, Yunchao Wei, Jizhong Han, Luoqi Liu, and Bo Li. Referring image segmentation via cross-modal progressive comprehension. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10488-10497, 2020." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 510, + 506, + 545 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 510, + 506, + 545 + ], + "spans": [ + { + "bbox": [ + 106, + 510, + 506, + 545 + ], + "type": "text", + "content": "[22] Tianrui Hui, Si Liu, Shaofei Huang, Guanbin Li, Sansi Yu, Faxi Zhang, and Jizhong Han. Linguistic structure guided context modeling for referring image segmentation. In European Conference on Computer Vision, pages 59-75. Springer, 2020." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 552, + 504, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 552, + 504, + 586 + ], + "spans": [ + { + "bbox": [ + 106, + 552, + 504, + 586 + ], + "type": "text", + "content": "[23] Deyi Ji, Feng Zhao, Hongtao Lu, Mingyuan Tao, and Jieping Ye. Ultra-high resolution segmentation with ultra-rich context: A novel benchmark. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 23621-23630, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 594, + 506, + 628 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 594, + 506, + 628 + ], + "spans": [ + { + "bbox": [ + 106, + 594, + 506, + 628 + ], + "type": "text", + "content": "[24] Lixia Ji, Yunlong Du, Yiping Dang, Wenzhao Gao, and Han Zhang. A survey of methods for addressing the challenges of referring image segmentation. Neurocomputing, 583:127599, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 635, + 506, + 679 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 635, + 506, + 679 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 506, + 679 + ], + "type": "text", + "content": "[25] Alexander Kirillov, Eric Mintun, Nikhila Ravi, Hanzi Mao, Chloe Rolland, Laura Gustafson, Tete Xiao, Spencer Whitehead, Alexander C Berg, Wan-Yen Lo, et al. Segment anything. In Proceedings of the IEEE/CVF international conference on computer vision, pages 4015-4026, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 106, + 688, + 506, + 723 + ], + "type": "text", + "content": "[26] Xin Lai, Zhuotao Tian, Yukang Chen, Yanwei Li, Yuhui Yuan, Shu Liu, and Jiaya Jia. Lisa: Reasoning segmentation via large language model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 9579-9589, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[27] Mengcheng Lan, Chaofeng Chen, Yue Zhou, Jiaxing Xu, Yiping Ke, Xinjiang Wang, Litong Feng, and Wayne Zhang. Text4seg: Reimagining image segmentation as text generation. arXiv preprint arXiv:2410.09855, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 505, + 148 + ], + "type": "text", + "content": "[28] Sen Lei, Xinyu Xiao, Tianlin Zhang, Heng-Chao Li, Zhenwei Shi, and Qing Zhu. Exploring fine-grained image-text alignment for referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "spans": [ + { + "bbox": [ + 107, + 154, + 504, + 189 + ], + "type": "text", + "content": "[29] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. Blip-2: Bootstrapping language-image pre-training with frozen image encoders and large language models. In International conference on machine learning, pages 19730–19742. PMLR, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 506, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 506, + 230 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 506, + 230 + ], + "type": "text", + "content": "[30] Kaiyu Li, Ruixun Liu, Xiangyong Cao, Xueru Bai, Feng Zhou, Deyu Meng, and Zhi Wang. Seearth-ov: Towards training-free open-vocabulary segmentation for remote sensing images. arXiv preprint arXiv:2410.01768, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 236, + 506, + 270 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 236, + 506, + 270 + ], + "spans": [ + { + "bbox": [ + 107, + 236, + 506, + 270 + ], + "type": "text", + "content": "[31] Ke Li, Gang Wan, Gong Cheng, Liqui Meng, and Junwei Han. Object detection in optical remote sensing images: A survey and a new benchmark. ISPRS journal of photogrammetry and remote sensing, 159:296-307, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 277, + 506, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 277, + 506, + 312 + ], + "spans": [ + { + "bbox": [ + 107, + 277, + 506, + 312 + ], + "type": "text", + "content": "[32] Ruiyu Li, Kaican Li, Yi-Chun Kuo, Michelle Shu, Xiaojuan Qi, Xiaoyong Shen, and Jiaya Jia. Referring image segmentation via recurrent refinement networks. In Proceedings of the IEEE Conference on Computer Vision and Pattern Recognition, pages 5745-5753, 2018." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 318, + 506, + 352 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 318, + 506, + 352 + ], + "spans": [ + { + "bbox": [ + 107, + 318, + 506, + 352 + ], + "type": "text", + "content": "[33] Yuanzhi Li, Sébastien Bubeck, Ronen Eldan, Allie Del Giorno, Suriya Gunasekar, and Yin Tat Lee. Textbooks are all you need ii: phi-1.5 technical report. arXiv preprint arXiv:2309.05463, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 358, + 506, + 393 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 358, + 506, + 393 + ], + "spans": [ + { + "bbox": [ + 107, + 358, + 506, + 393 + ], + "type": "text", + "content": "[34] Tsung-Yi Lin, Priya Goyal, Ross Girshick, Kaiming He, and Piotr Dólar. Focal loss for dense object detection. In Proceedings of the IEEE international conference on computer vision, pages 2980-2988, 2017." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 400, + 506, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 400, + 506, + 435 + ], + "spans": [ + { + "bbox": [ + 107, + 400, + 506, + 435 + ], + "type": "text", + "content": "[35] Chang Liu, Henghui Ding, and Xudong Jiang. Gres: Generalized referring expression segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 23592-23601, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 441, + 506, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 441, + 506, + 475 + ], + "spans": [ + { + "bbox": [ + 107, + 441, + 506, + 475 + ], + "type": "text", + "content": "[36] Chenxi Liu, Zhe Lin, Xiaohui Shen, Jimei Yang, Xin Lu, and Alan Yuille. Recurrent multimodal interaction for referring image segmentation. In Proceedings of the IEEE international conference on computer vision, pages 1271-1280, 2017." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 482, + 504, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 482, + 504, + 507 + ], + "spans": [ + { + "bbox": [ + 107, + 482, + 504, + 507 + ], + "type": "text", + "content": "[37] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. Advances in neural information processing systems, 36:34892-34916, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 513, + 504, + 557 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 513, + 504, + 557 + ], + "spans": [ + { + "bbox": [ + 107, + 513, + 504, + 557 + ], + "type": "text", + "content": "[38] Jiang Liu, Hui Ding, Zhaowei Cai, Yuting Zhang, Ravi Kumar Satzoda, Vijay Mahadevan, and R Manmatha. Polyformer: Referring image segmentation as sequential polygon generation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18653-18663, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 564, + 504, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 564, + 504, + 599 + ], + "spans": [ + { + "bbox": [ + 107, + 564, + 504, + 599 + ], + "type": "text", + "content": "[39] Si Liu, Tianrui Hui, Shaofei Huang, Yunchao Wei, Bo Li, and Guanbin Li. Cross-modal progressive comprehension for referring segmentation. IEEE Transactions on Pattern Analysis and Machine Intelligence, 44(9):4761-4775, 2021." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 605, + 506, + 650 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 506, + 650 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 506, + 650 + ], + "type": "text", + "content": "[40] Sihan Liu, Yiwei Ma, Xiaqing Zhang, Haowei Wang, Jiayi Ji, Xiaoshuai Sun, and Rongrong Ji. Rotated multi-scale interaction network for referring remote sensing image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26658-26668, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 658, + 506, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 658, + 506, + 693 + ], + "spans": [ + { + "bbox": [ + 107, + 658, + 506, + 693 + ], + "type": "text", + "content": "[41] Sun-Ao Liu, Yiheng Zhang, Zhaofan Qiu, Hongtao Xie, Yongdong Zhang, and Ting Yao. Caris: Context-aware referring image segmentation. In Proceedings of the 31st ACM International Conference on Multimedia, pages 779-788, 2023." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 699, + 504, + 723 + ], + "type": "text", + "content": "[42] Xu Liu and Zhouhui Lian. Rsunivlm: A unified vision language model for remote sensing via granularity-oriented mixture of experts. arXiv preprint arXiv:2412.05679, 2024." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 106 + ], + "type": "text", + "content": "[43] Ze Liu, Yutong Lin, Yue Cao, Han Hu, Yixuan Wei, Zheng Zhang, Stephen Lin, and Baining Guo. Swin transformer: Hierarchical vision transformer using shifted windows. In Proceedings of the IEEE/CVF international conference on computer vision, pages 10012-10022, 2021." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 506, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 506, + 159 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 506, + 159 + ], + "type": "text", + "content": "[44] Yang Long, Gui-Song Xia, Shengyang Li, Wen Yang, Michael Ying Yang, Xiao Xiang Zhu, Liangpei Zhang, and Deren Li. On creating benchmark dataset for aerial image interpretation: Reviews, guidances, and million-aid. IEEE Journal of selected topics in applied earth observations and remote sensing, 14:4205–4230, 2021." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 106, + 166, + 504, + 201 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 166, + 504, + 201 + ], + "spans": [ + { + "bbox": [ + 106, + 166, + 504, + 201 + ], + "type": "text", + "content": "[45] Siqi Lu, Junlin Guo, James R Zimmer-Dauphinee, Jordan M Nieusma, Xiao Wang, Steven A Wernke, Yuankai Huo, et al. Vision foundation models in remote sensing: A survey. IEEE Geoscience and Remote Sensing Magazine, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 208, + 504, + 242 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 208, + 504, + 242 + ], + "spans": [ + { + "bbox": [ + 107, + 208, + 504, + 242 + ], + "type": "text", + "content": "[46] Xiaoqiang Lu, Binqiang Wang, Xiangtao Zheng, and Xuelong Li. Exploring models and data for remote sensing image caption generation. IEEE Transactions on Geoscience and Remote Sensing, 56(4):2183-2195, 2017." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 249, + 504, + 283 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 249, + 504, + 283 + ], + "spans": [ + { + "bbox": [ + 107, + 249, + 504, + 283 + ], + "type": "text", + "content": "[47] Edgar Margffoy-Tuay, Juan C Pérez, Emilio Botero, and Pablo Arbeláez. Dynamic multimodal instance segmentation guided by natural language queries. In Proceedings of the European Conference on Computer Vision (ECCV), pages 630–645, 2018." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 291, + 504, + 325 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 291, + 504, + 325 + ], + "spans": [ + { + "bbox": [ + 107, + 291, + 504, + 325 + ], + "type": "text", + "content": "[48] Fausto Miletari, Nassir Navab, and Seyed-Ahmad Ahmadi. V-net: Fully convolutional neural networks for volumetric medical image segmentation. In 2016 fourth international conference on 3D vision (3DV), pages 565-571. IEEE, 2016." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 333, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 333, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 107, + 333, + 506, + 367 + ], + "type": "text", + "content": "[49] Sayan Nag, Koustava Goswami, and Srikrishna Karanam. Safari: Adaptive sequence tr a ns f ormer for we a kly supervised r eferring expression segmentat i on. In European Conference on Computer Vision, pages 485-503. Springer, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 374, + 506, + 407 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 374, + 506, + 407 + ], + "spans": [ + { + "bbox": [ + 107, + 374, + 506, + 407 + ], + "type": "text", + "content": "[50] Ruizhe Ou, Yuan Hu, Fan Zhang, Jiaxin Chen, and Yu Liu. Geopix: Multi-modal large language model for pixel-level image understanding in remote sensing. arXiv preprint arXiv:2501.06828, 2025." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 416, + 506, + 451 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 416, + 506, + 451 + ], + "spans": [ + { + "bbox": [ + 107, + 416, + 506, + 451 + ], + "type": "text", + "content": "[51] Chao Pang, Xingxing Weng, Jiang Wu, Jiayu Li, Yi Liu, Jiaxing Sun, Weijia Li, Shuai Wang, Litong Feng, Gui-Song Xia, et al. Vhm: Versatile and honest vision language model for remote sensing image analysis. arXiv preprint arXiv:2403.20213, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 457, + 506, + 503 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 457, + 506, + 503 + ], + "spans": [ + { + "bbox": [ + 107, + 457, + 506, + 503 + ], + "type": "text", + "content": "[52] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In International conference on machine learning, pages 8748-8763. PmLR, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 510, + 504, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 510, + 504, + 555 + ], + "spans": [ + { + "bbox": [ + 107, + 510, + 504, + 555 + ], + "type": "text", + "content": "[53] Hanoona Rasheed, Muhammad Maaz, Sahal Shaji, Abdelrahman Shaker, Salman Khan, Hisham Cholakkal, Rao M Anwer, Eric Xing, Ming-Hsuan Yang, and Fahad S Khan. Glamm: Pixel grounding large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 13009-13018, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 563, + 506, + 597 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 563, + 506, + 597 + ], + "spans": [ + { + "bbox": [ + 107, + 563, + 506, + 597 + ], + "type": "text", + "content": "[54] Nikhila Ravi, Valentin Gabeur, Yuan-Ting Hu, Ronghang Hu, Chaitanya Ryali, Tengyu Ma, Haitham Khedr, Roman Rädle, Chloe Rolland, Laura Gustafson, et al. Sam 2: Segment anything in images and videos. arXiv preprint arXiv:2408.00714, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "spans": [ + { + "bbox": [ + 107, + 605, + 506, + 639 + ], + "type": "text", + "content": "[55] Zhongwei Ren, Zhicheng Huang, Yunchao Wei, Yao Zhao, Dongmei Fu, Jiashi Feng, and Xiaojie Jin. Pixel reasoning with large multimodal model. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 26374-26383, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "spans": [ + { + "bbox": [ + 106, + 647, + 506, + 680 + ], + "type": "text", + "content": "[56] Esther Rolf, Konstantin Klemmer, Caleb Robinson, and Hannah Kerner. Mission critical-satellite data is a distinct modality in machine learning. arXiv preprint arXiv:2402.01444, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 688, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 504, + 721 + ], + "type": "text", + "content": "[57] Akashah Shabbir, Mohammed Zumri, Mohammed Bennamoun, Fahad S Khan, and Salman Khan. Geopixel: Pixel grounding large multimodal model in remote sensing. arXiv preprint arXiv:2501.13925, 2025." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 722 + ], + "type": "list", + "angle": 0, + "index": 16, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "text", + "content": "[58] Chao Shang, Zichen Song, Heqian Qiu, Lanxiao Wang, Fanman Meng, and Hongliang Li. Prompt-driven referring image segmentation with instance contrasting. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4124-4134, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 111, + 505, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 111, + 505, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 111, + 505, + 136 + ], + "type": "text", + "content": "[59] Claude E Shannon. A mathematical theory of communication. The Bell system technical journal, 27(3):379-423, 1948." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 140, + 506, + 175 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 140, + 506, + 175 + ], + "spans": [ + { + "bbox": [ + 107, + 140, + 506, + 175 + ], + "type": "text", + "content": "[60] Hengcan Shi, Hongliang Li, Fanman Meng, and Qingbo Wu. Key-word-aware network for referring expression image segmentation. In Proceedings of the European Conference on Computer Vision (ECCV), pages 38-54, 2018." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 179, + 506, + 215 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 179, + 506, + 215 + ], + "spans": [ + { + "bbox": [ + 106, + 179, + 506, + 215 + ], + "type": "text", + "content": "[61] Andreas Steiner, André Susano Pinto, Michael Tschannen, Daniel Keysers, Xiao Wang, Yonatan Bitton, Alexey Gritsenko, Matthias Minderer, Anthony Sherbondy, Shangbang Long, et al. Paligemma 2: A family of versatile vlms for transfer. arXiv preprint arXiv:2412.03555, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 106, + 219, + 506, + 255 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 219, + 506, + 255 + ], + "spans": [ + { + "bbox": [ + 106, + 219, + 506, + 255 + ], + "type": "text", + "content": "[62] Hugo Touvron, Louis Martin, Kevin Stone, Peter Albert, Amjad Almahairi, Yasmine Babaei, Nikolay Bashlykov, Soumya Batra, Prajjwal Bhargava, Shruti Bhosale, et al. Llama 2: Open foundation and fine-tuned chat models. arXiv preprint arXiv:2307.09288, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 259, + 506, + 281 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 259, + 506, + 281 + ], + "spans": [ + { + "bbox": [ + 107, + 259, + 506, + 281 + ], + "type": "text", + "content": "[63] A Vaswani. Attention is all you need. Advances in Neural Information Processing Systems, 2017." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 106, + 288, + 506, + 332 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 288, + 506, + 332 + ], + "spans": [ + { + "bbox": [ + 106, + 288, + 506, + 332 + ], + "type": "text", + "content": "[64] Fengxiang Wang, Hongzhen Wang, Mingshuo Chen, Di Wang, Yulin Wang, Zonghao Guo, Qiang Ma, Long Lan, Wenjing Yang, Jing Zhang, et al. Xlrs-bench: Could your multimodal llms understand extremely large ultra-high-resolution remote sensing imagery? arXiv preprint arXiv:2503.23771, 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 107, + 338, + 504, + 372 + ], + "type": "text", + "content": "[65] Junchi Wang and Lei Ke. Llm-seg: Bridging image segmentation and large language model reasoning. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 1765-1774, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 378, + 506, + 422 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 378, + 506, + 422 + ], + "spans": [ + { + "bbox": [ + 107, + 378, + 506, + 422 + ], + "type": "text", + "content": "[66] Junjue Wang, Zhuo Zheng, Zihang Chen, Ailong Ma, and Yanfei Zhong. Earthvqa: Towards queryable earth via relational reasoning-based remote sensing visual question answering. In Proceedings of the AAAI Conference on Artificial Intelligence, volume 38, pages 5481-5489, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 429, + 506, + 462 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 429, + 506, + 462 + ], + "spans": [ + { + "bbox": [ + 107, + 429, + 506, + 462 + ], + "type": "text", + "content": "[67] Junjue Wang, Zhuo Zheng, Ailong Ma, Xiaoyan Lu, and Yanfei Zhong. Loveda: A remote sensing land-cover dataset for domain adaptive semantic segmentation. arXiv preprint arXiv:2110.08733, 2021." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 468, + 506, + 512 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 468, + 506, + 512 + ], + "spans": [ + { + "bbox": [ + 107, + 468, + 506, + 512 + ], + "type": "text", + "content": "[68] Wenhai Wang, Zhe Chen, Xiaokang Chen, Jiannan Wu, Xizhou Zhu, Gang Zeng, Ping Luo, Tong Lu, Jie Zhou, Yu Qiao, et al. Visionllm: Large language model is also an open-ended decoder for vision-centric tasks. Advances in Neural Information Processing Systems, 36:61501-61513, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 519, + 504, + 553 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 519, + 504, + 553 + ], + "spans": [ + { + "bbox": [ + 107, + 519, + 504, + 553 + ], + "type": "text", + "content": "[69] Zhaoqing Wang, Yu Lu, Qiang Li, Xunqiang Tao, Yandong Guo, Mingming Gong, and Tongliang Liu. Cris: Clip-driven referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 11686-11695, 2022." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 559, + 506, + 592 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 559, + 506, + 592 + ], + "spans": [ + { + "bbox": [ + 107, + 559, + 506, + 592 + ], + "type": "text", + "content": "[70] Zhou Wang, Alan C Bovik, Hamid R Sheikh, and Eero P Simoncelli. Image quality assessment: from error visibility to structural similarity. IEEE transactions on image processing, 13(4):600-612, 2004." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 598, + 506, + 632 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 598, + 506, + 632 + ], + "spans": [ + { + "bbox": [ + 107, + 598, + 506, + 632 + ], + "type": "text", + "content": "[71] Cong Wei, Yujie Zhong, Haoxian Tan, Yingsen Zeng, Yong Liu, Zheng Zhao, and Yujiu Yang. Instructseg: Unifying instructed visual segmentation with multi-modal large language models. arXiv preprint arXiv:2412.14006, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 638, + 504, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 638, + 504, + 672 + ], + "spans": [ + { + "bbox": [ + 107, + 638, + 504, + 672 + ], + "type": "text", + "content": "[72] Jiannan Wu, Yi Jiang, Peize Sun, Zehuan Yuan, and Ping Luo. Language as queries for referring video object segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 4974-4984, 2022." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 677, + 504, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 677, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 107, + 677, + 504, + 722 + ], + "type": "text", + "content": "[73] Jiannan Wu, Muyan Zhong, Sen Xing, Zeqiang Lai, Zhaoyang Liu, Zhe Chen, Wenhai Wang, Xizhou Zhu, Lewei Lu, Tong Lu, et al. Visionlm v2: An end-to-end generalist multimodal large language model for hundreds of vision-language tasks. Advances in Neural Information Processing Systems, 37:69925-69975, 2025." + } + ] + } + ], + "index": 15 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 723 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 505, + 95 + ], + "type": "text", + "content": "[74] Jianzong Wu, Xiangtai Li, Xia Li, Henghui Ding, Yunhai Tong, and Dacheng Tao. Towards robust referring image segmentation. IEEE Transactions on Image Processing, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 100, + 506, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 100, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 100, + 506, + 136 + ], + "type": "text", + "content": "[75] Zhuofan Xia, Dongchen Han, Yizeng Han, Xuran Pan, Shiji Song, and Gao Huang. Gsva: Generalized segmentation via multimodal large language models. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 3858-3869, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 140, + 506, + 176 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 140, + 506, + 176 + ], + "spans": [ + { + "bbox": [ + 107, + 140, + 506, + 176 + ], + "type": "text", + "content": "[76] Bin Xiao, Haiping Wu, Weijian Xu, Xiyang Dai, Houdong Hu, Yumao Lu, Michael Zeng, Ce Liu, and Lu Yuan. Florence-2: Advancing a unified representation for a variety of vision tasks (2023). URL https://arxiv.org/abs/2311.06242, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 180, + 506, + 225 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 180, + 506, + 225 + ], + "spans": [ + { + "bbox": [ + 106, + 180, + 506, + 225 + ], + "type": "text", + "content": "[77] Zunnan Xu, Zhihong Chen, Yong Zhang, Yibing Song, Xiang Wan, and Guanbin Li. Bridging vision and language encoders: Parameter-efficient tuning for referring image segmentation. In Proceedings of the IEEE/CVF International Conference on Computer Vision, pages 17503-17512, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "spans": [ + { + "bbox": [ + 107, + 232, + 504, + 266 + ], + "type": "text", + "content": "[78] Cilin Yan, Haochen Wang, Shilin Yan, Xiaolong Jiang, Yao Hu, Guoliang Kang, Weidi Xie, and Efstratios Gavves. Visa: Reasoning video object segmentation via large language models. In European Conference on Computer Vision, pages 98-115. Springer, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 271, + 504, + 305 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 271, + 504, + 305 + ], + "spans": [ + { + "bbox": [ + 107, + 271, + 504, + 305 + ], + "type": "text", + "content": "[79] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 311, + 506, + 346 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 311, + 506, + 346 + ], + "spans": [ + { + "bbox": [ + 107, + 311, + 506, + 346 + ], + "type": "text", + "content": "[80] Senqiao Yang, Tianyuan Qu, Xin Lai, Zhuotao Tian, Bohao Peng, Shu Liu, and Jiaya Jia. Lisa++: An improved baseline for reasoning segmentation with large language model. arXiv preprint arXiv:2312.17240, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 107, + 351, + 506, + 385 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 351, + 506, + 385 + ], + "spans": [ + { + "bbox": [ + 107, + 351, + 506, + 385 + ], + "type": "text", + "content": "[81] Zhao Yang, Jiaqi Wang, Yansong Tang, Kai Chen, Hengshuang Zhao, and Philip HS Torr. Lavt: Language-aware vision transformer for referring image segmentation. In Proceedings of the IEEE/CVF Conference on Computer Vision and Pattern Recognition, pages 18155–18165, 2022." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 391, + 506, + 425 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 391, + 506, + 425 + ], + "spans": [ + { + "bbox": [ + 107, + 391, + 506, + 425 + ], + "type": "text", + "content": "[82] Linli Yao, Lei Li, Shuhuai Ren, Lean Wang, Yuanxin Liu, Xu Sun, and Lu Hou. Deco: Decoupling token compression from semantic abstraction in multimodal large language models. arXiv preprint arXiv:2405.20985, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 107, + 430, + 504, + 465 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 430, + 504, + 465 + ], + "spans": [ + { + "bbox": [ + 107, + 430, + 504, + 465 + ], + "type": "text", + "content": "[83] Linwei Ye, Mrigank Rochan, Zhi Liu, and Yang Wang. Cross-modal self-attention network for referring image segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 10502–10511, 2019." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 107, + 471, + 504, + 505 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 471, + 504, + 505 + ], + "spans": [ + { + "bbox": [ + 107, + 471, + 504, + 505 + ], + "type": "text", + "content": "[84] Haobo Yuan, Xiangtai Li, Tao Zhang, Zilong Huang, Shilin Xu, Shunping Ji, Yunhai Tong, Lu Qi, Jiashi Feng, and Ming-Hsuan Yang. Sa2va: Marrying sam2 with llava for dense grounded understanding of images and videos. arXiv preprint arXiv:2501.04001, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 107, + 510, + 504, + 535 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 510, + 504, + 535 + ], + "spans": [ + { + "bbox": [ + 107, + 510, + 504, + 535 + ], + "type": "text", + "content": "[85] Zhenghang Yuan, Lichao Mou, Yuansheng Hua, and Xiao Xiang Zhu. Rrsis: Referring remote sensing image segmentation. IEEE Transactions on Geoscience and Remote Sensing, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 107, + 540, + 506, + 574 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 540, + 506, + 574 + ], + "spans": [ + { + "bbox": [ + 107, + 540, + 506, + 574 + ], + "type": "text", + "content": "[86] Yang Zhan, Zhitong Xiong, and Yuan Yuan. Rsvg: Exploring data and models for visual grounding on remote sensing data. IEEE Transactions on Geoscience and Remote Sensing, 61:1-13, 2023." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 107, + 578, + 506, + 603 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 578, + 506, + 603 + ], + "spans": [ + { + "bbox": [ + 107, + 578, + 506, + 603 + ], + "type": "text", + "content": "[87] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. arXiv preprint arXiv:2311.04498, 2023." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 107, + 609, + 504, + 642 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 609, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 107, + 609, + 504, + 642 + ], + "type": "text", + "content": "[88] Ao Zhang, Yuan Yao, Wei Ji, Zhiyuan Liu, and Tat-Seng Chua. Next-chat: An Imm for chat, detection and segmentation. In International Conference on Machine Learning, pages 60116-60133. PMLR, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 107, + 647, + 504, + 682 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 647, + 504, + 682 + ], + "spans": [ + { + "bbox": [ + 107, + 647, + 504, + 682 + ], + "type": "text", + "content": "[89] Tao Zhang, Xiangtai Li, Hao Fei, Haobo Yuan, Shengqiong Wu, Shunping Ji, Chen Change Loy, and Shuicheng Yan. Omg-llava: Bridging image-level, object-level, pixel-level reasoning and understanding. Advances in Neural Information Processing Systems, 37:71737-71767, 2025." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 107, + 688, + 506, + 723 + ], + "type": "text", + "content": "[90] Yichi Zhang, Ziqiao Ma, Xiaofeng Gao, Suhaila Shakiah, Qiaozi Gao, and Joyce Chai. Groundhog: Grounding large language models to holistic segmentation. In Proceedings of the IEEE/CVF conference on computer vision and pattern recognition, pages 14227-14238, 2024." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 269 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "text", + "content": "[91] Yuxuan Zhang, Tianheng Cheng, Rui Hu, Lei Liu, Heng Liu, Longjin Ran, Xiaoxin Chen, Wenyu Liu, and Xinggang Wang. Evf-sam: Early vision-language fusion for text-prompted segment anything model. arXiv preprint arXiv:2406.20076, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 506, + 146 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 506, + 146 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 506, + 146 + ], + "type": "text", + "content": "[92] Zheng Zhang, Yeyao Ma, Enming Zhang, and Xiang Bai. Psalm: Pixelwise segmentation with large multi-modal model. In European Conference on Computer Vision, pages 74-91. Springer, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 153, + 506, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 153, + 506, + 188 + ], + "spans": [ + { + "bbox": [ + 107, + 153, + 506, + 188 + ], + "type": "text", + "content": "[93] Bolei Zhou, Hang Zhao, Xavier Puig, Sanja Fidler, Adela Barriuso, and Antonio Torralba. Scene parsing through ade20k dataset. In Proceedings of the IEEE conference on computer vision and pattern recognition, pages 633-641, 2017." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 107, + 194, + 506, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 194, + 506, + 228 + ], + "spans": [ + { + "bbox": [ + 107, + 194, + 506, + 228 + ], + "type": "text", + "content": "[94] Li Zhou, Xu Yuan, Zenghui Sun, Zikun Zhou, and Jingsong Lan. Instruction-guided multi-granularity segmentation and captioning with large multimodal model. arXiv preprint arXiv:2409.13407, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 235, + 506, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 235, + 506, + 269 + ], + "spans": [ + { + "bbox": [ + 107, + 235, + 506, + 269 + ], + "type": "text", + "content": "[95] Yue Zhou, Mengcheng Lan, Xiang Li, Yiping Ke, Xue Jiang, Litong Feng, and Wayne Zhang. Geoground: A unified large vision-language model. for remote sensing visual grounding. arXiv preprint arXiv:2411.11904, 2024." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 153, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 153, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 153, + 83 + ], + "type": "text", + "content": "A Data" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 97, + 250, + 108 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 97, + 250, + 108 + ], + "spans": [ + { + "bbox": [ + 105, + 97, + 250, + 108 + ], + "type": "text", + "content": "A.1 Annotation of EarthReason" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "spans": [ + { + "bbox": [ + 104, + 118, + 506, + 174 + ], + "type": "text", + "content": "Each sample of the EarthReason benchmark consists of an image, a corresponding mask, and six reasoning queries along with their respective answers. Given that our metadata is derived from classification datasets, we employed GPT-4o and GPT-3.5 to generate textual annotations, and invited multiple remote sensing and vision experts to provide accurate and reliable mask annotations. Overall, our annotation process consists of the following three steps:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 184, + 506, + 372 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 111, + 184, + 505, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 184, + 505, + 228 + ], + "spans": [ + { + "bbox": [ + 111, + 184, + 505, + 228 + ], + "type": "text", + "content": "- Step-1: To fully leverage the powerful multimodal capabilities and extensive geographic knowledge of GPT-4o, we carefully design the prompt, which is then provided alongside images and their corresponding category labels to generate a reasoning question-answer pair. The prompt is illustrated in Figure 6." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 234, + 505, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 234, + 505, + 268 + ], + "spans": [ + { + "bbox": [ + 111, + 234, + 505, + 268 + ], + "type": "text", + "content": "- Step-2: To avoid homogeneous question-answer formats under a single prompt, we further employ the textual capabilities of GPT-3.5 to expand each generated question into six variations and each answer into three alternatives. The prompt used for this expansion is shown in Figure 7." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 273, + 506, + 372 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 273, + 506, + 372 + ], + "spans": [ + { + "bbox": [ + 111, + 273, + 506, + 372 + ], + "type": "text", + "content": "- Step-3: Unlike previous methods that rely on semi-automatic mask annotation based on off-the-shelf bounding boxes or masks, we invite multiple remote sensing vision experts to perform accurate and efficient mask annotation guided by the generated questions. To further improve annotation efficiency, we incorporate SAM-H as an auxiliary tool for some simple targets. Subsequently, we perform cross-validation of the annotation results and re-associate the samples that do not meet the quality standards. As shown in Figure 8, (a), (b), and (c), derived from the RRSIS-D dataset, illustrate the masks of semi-automatic annotation based on bounding boxes. (a) and (c) exhibit noticeable annotation errors, while in (b), the query does not align with the annotation. (d), (e), and (f) illustrate our high-quality manual annotations." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 110, + 398, + 382, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 398, + 382, + 442 + ], + "spans": [ + { + "bbox": [ + 110, + 398, + 382, + 442 + ], + "type": "text", + "content": "Prompt: You are an expert in geographic remote sensing imagery. Please fully analyze the geographical landscape and cultural features in remote sensing images. Generate an implicit reasoning questions based on given object categories. Please use your imagination and feel free to change the sentence structure or add a situation description. Just give the implicit reasoning questions that meet the requirements. The descriptions must refer to the natural landscapes and cultural landscapes shown in remote sensing images. The output implicit reasoning questions need to meet the following requirements:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 110, + 442, + 382, + 463 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 442, + 382, + 463 + ], + "spans": [ + { + "bbox": [ + 110, + 442, + 382, + 463 + ], + "type": "text", + "content": "(1) Please imagine the scene and output an implicit reasoning question to describe the attributes or functions of the given object. The output question must have a certain degree of reasoning difficulty and be helpful to humans." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 464, + 382, + 485 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 464, + 382, + 485 + ], + "spans": [ + { + "bbox": [ + 111, + 464, + 382, + 485 + ], + "type": "text", + "content": "(2) Do not explicitly write the name or description of the target object in the original text. Questions should be asked in the form of asking where, which infrastructure, how to do a certain activity, which location, what object." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 486, + 382, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 486, + 382, + 507 + ], + "spans": [ + { + "bbox": [ + 111, + 486, + 382, + 507 + ], + "type": "text", + "content": "(3) The output cannot be redundant. Just give one question and its answer that you think has the highest confidence. The question should be at least 18 words. The answer requires giving the name of the given object and then explaining why the answer is this, about 20 words." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 111, + 507, + 382, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 507, + 382, + 521 + ], + "spans": [ + { + "bbox": [ + 111, + 507, + 382, + 521 + ], + "type": "text", + "content": "(4) Do not output any redundant information except the question and the answer, and separate them with line break." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 111, + 521, + 265, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 521, + 265, + 529 + ], + "spans": [ + { + "bbox": [ + 111, + 521, + 265, + 529 + ], + "type": "text", + "content": "The given object category is . " + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 111, + 531, + 140, + 544 + ], + "blocks": [ + { + "bbox": [ + 111, + 531, + 140, + 544 + ], + "lines": [ + { + "bbox": [ + 111, + 531, + 140, + 544 + ], + "spans": [ + { + "bbox": [ + 111, + 531, + 140, + 544 + ], + "type": "image", + "image_path": "8a7c1ee7e7df958a3036a633142562202525c72efe5cb6c495616c48f73e2c0d.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 104, + 590, + 504, + 611 + ], + "lines": [ + { + "bbox": [ + 104, + 590, + 504, + 611 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 504, + 611 + ], + "type": "text", + "content": "Figure 6: The illustration of the prompt construction process for generating question-answer pairs for geospatial pixel reasoning." + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 392, + 405, + 493, + 502 + ], + "blocks": [ + { + "bbox": [ + 392, + 405, + 493, + 502 + ], + "lines": [ + { + "bbox": [ + 392, + 405, + 493, + 502 + ], + "spans": [ + { + "bbox": [ + 392, + 405, + 493, + 502 + ], + "type": "image", + "image_path": "cee5a1dcf359a7e6af58bb4d2044cd327dc5dbd4b06e241ee9bdfc95b7d4e653.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 110, + 546, + 481, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 546, + 481, + 555 + ], + "spans": [ + { + "bbox": [ + 110, + 546, + 481, + 555 + ], + "type": "text", + "content": "Question: If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 110, + 557, + 495, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 557, + 495, + 573 + ], + "spans": [ + { + "bbox": [ + 110, + 557, + 495, + 573 + ], + "type": "text", + "content": "Answer: The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 105, + 635, + 230, + 646 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 635, + 230, + 646 + ], + "spans": [ + { + "bbox": [ + 105, + 635, + 230, + 646 + ], + "type": "text", + "content": "A.2 EarthReason Statistics" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 656, + 504, + 723 + ], + "type": "text", + "content": "The EarthReason benchmark comprises 28 categories, and the number of samples in each category is shown in Figure 9 (a). It can be observed that the distribution of the 28 categories is relatively balanced. Figure 9 (b), (c), and (d) illustrate the category distributions in the training, validation, and test sets, respectively. To evaluate the model's generalization capability, we specifically excluded four categories—\"basketball court\", \"island\", \"lake\", and \"stadium\"—from the training set. Moreover, we introduced 119 empty target samples to mitigate potential hallucinations of the model." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 110, + 76, + 498, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 76, + 498, + 100 + ], + "spans": [ + { + "bbox": [ + 110, + 76, + 498, + 100 + ], + "type": "text", + "content": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 6 related questions or topics. The given question is \"If an earthquake were to occur, what is the most secure area to assemble numerous individuals that also facilitates emergency service access?\"" + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 111, + 104, + 138, + 115 + ], + "blocks": [ + { + "bbox": [ + 111, + 104, + 138, + 115 + ], + "lines": [ + { + "bbox": [ + 111, + 104, + 138, + 115 + ], + "spans": [ + { + "bbox": [ + 111, + 104, + 138, + 115 + ], + "type": "image", + "image_path": "8e0c8b664ffb3fc42eff7302e7b220fe9d53dbda5e1ae4a06ffd7deb61a68e29.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 110, + 120, + 486, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 120, + 486, + 166 + ], + "spans": [ + { + "bbox": [ + 110, + 120, + 486, + 166 + ], + "type": "text", + "content": "Response: 1. In case of an earthquake, where can individuals find a clear and accessible area nearby to assemble safely? 2. During an earthquake, what spacious and unobstructed area close by would provide the safest environment for all of us to meet? 3. Should an earthquake happen, where in the vicinity can people find an open area to come together safely and securely? 4. In the scenario of an earthquake, where can residents find a secure area with open space and accessible paths for evacuation? 5. In an earthquake situation, which nearby area featuring open space and clear pathways would be the safest choice for resident evacuation? 6. If an earthquake occurs, which types of infrastructure are anticipated to function as dependable gathering spots with adequate open areas?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 178, + 495, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 178, + 495, + 201 + ], + "spans": [ + { + "bbox": [ + 111, + 178, + 495, + 201 + ], + "type": "text", + "content": "Prompt: Imagine you are a remote sensing expert with extensive knowledge in geography. I will give you a question or description related to remote sensing, and you will expand it into 3 related questions or topics. The given description is \"The ground track field stands out as the most secure location, thanks to its vast open area that minimizes the chance of hazards, as well as its close accessibility to primary roads for swift emergency response.\"" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 111, + 209, + 140, + 221 + ], + "blocks": [ + { + "bbox": [ + 111, + 209, + 140, + 221 + ], + "lines": [ + { + "bbox": [ + 111, + 209, + 140, + 221 + ], + "spans": [ + { + "bbox": [ + 111, + 209, + 140, + 221 + ], + "type": "image", + "image_path": "cd5f50160afb79ae1a63f02722faf7aa11d19addd646282b4ecb2b9b461fe764.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 130, + 270, + 479, + 281 + ], + "lines": [ + { + "bbox": [ + 130, + 270, + 479, + 281 + ], + "spans": [ + { + "bbox": [ + 130, + 270, + 479, + 281 + ], + "type": "text", + "content": "Figure 7: The illustration of the prompt construction process for expand question-answer pairs." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 110, + 226, + 489, + 258 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 226, + 489, + 258 + ], + "spans": [ + { + "bbox": [ + 110, + 226, + 489, + 258 + ], + "type": "text", + "content": "Response: 1. With minimal obstructions and easy access to main roads, the ground track field offers both safety and convenience, positioning it as the most reliable site in terms of hazard prevention and emergency preparedness. 2. The expansive open space of the track field provides a secure environment, greatly minimizing the chance of falling objects. This makes it an ideal refuge from any potential dangers. 3. With its generous outdoor space devoid of any encroaching structures, the ground track field emerges as the premier choice, ensuring an environment where safety is prioritized." + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 115, + 331, + 221, + 415 + ], + "blocks": [ + { + "bbox": [ + 115, + 331, + 221, + 415 + ], + "lines": [ + { + "bbox": [ + 115, + 331, + 221, + 415 + ], + "spans": [ + { + "bbox": [ + 115, + 331, + 221, + 415 + ], + "type": "image", + "image_path": "63be9c1fedf7ab7e4bf057f8eb6308a8f4f8322d71c7572a0c9a3f3ab0b40550.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 124, + 417, + 205, + 426 + ], + "lines": [ + { + "bbox": [ + 124, + 417, + 205, + 426 + ], + "spans": [ + { + "bbox": [ + 124, + 417, + 205, + 426 + ], + "type": "text", + "content": "(a) query: The gray bridge" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 250, + 331, + 359, + 415 + ], + "blocks": [ + { + "bbox": [ + 246, + 320, + 362, + 330 + ], + "lines": [ + { + "bbox": [ + 246, + 320, + 362, + 330 + ], + "spans": [ + { + "bbox": [ + 246, + 320, + 362, + 330 + ], + "type": "text", + "content": "Semi-automatic Annotation" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 250, + 331, + 359, + 415 + ], + "lines": [ + { + "bbox": [ + 250, + 331, + 359, + 415 + ], + "spans": [ + { + "bbox": [ + 250, + 331, + 359, + 415 + ], + "type": "image", + "image_path": "6a2f7de8ce8298676a498830ee0e324c128682c6a58db686b1b18de31119fa5d.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 255, + 417, + 353, + 426 + ], + "lines": [ + { + "bbox": [ + 255, + 417, + 353, + 426 + ], + "spans": [ + { + "bbox": [ + 255, + 417, + 353, + 426 + ], + "type": "text", + "content": "(b) query: The blue storage tank" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 384, + 331, + 493, + 414 + ], + "blocks": [ + { + "bbox": [ + 384, + 331, + 493, + 414 + ], + "lines": [ + { + "bbox": [ + 384, + 331, + 493, + 414 + ], + "spans": [ + { + "bbox": [ + 384, + 331, + 493, + 414 + ], + "type": "image", + "image_path": "3b338ec1416efc1d1a00774bc71968ec2a296ede5a3c9b43f9c12d3894da5ecf.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 378, + 415, + 500, + 425 + ], + "lines": [ + { + "bbox": [ + 378, + 415, + 500, + 425 + ], + "spans": [ + { + "bbox": [ + 378, + 415, + 500, + 425 + ], + "type": "text", + "content": "(c) query: The gray bridge in the middle" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 115, + 440, + 221, + 521 + ], + "blocks": [ + { + "bbox": [ + 115, + 440, + 221, + 521 + ], + "lines": [ + { + "bbox": [ + 115, + 440, + 221, + 521 + ], + "spans": [ + { + "bbox": [ + 115, + 440, + 221, + 521 + ], + "type": "image", + "image_path": "64f1d1067264538ec56ef833f567f8155031c3511a2b8380a674387b0ab0e6c6.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 160, + 521, + 170, + 530 + ], + "lines": [ + { + "bbox": [ + 160, + 521, + 170, + 530 + ], + "spans": [ + { + "bbox": [ + 160, + 521, + 170, + 530 + ], + "type": "text", + "content": "(d)" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 539, + 504, + 560 + ], + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 560 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 560 + ], + "type": "text", + "content": "Figure 8: Comparison of annotation quality. (a), (b) and (c) are from RRSIS-D dataset, (d), (e) and (f) are from our EarthReason dataset." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 249, + 441, + 359, + 521 + ], + "blocks": [ + { + "bbox": [ + 263, + 430, + 346, + 440 + ], + "lines": [ + { + "bbox": [ + 263, + 430, + 346, + 440 + ], + "spans": [ + { + "bbox": [ + 263, + 430, + 346, + 440 + ], + "type": "text", + "content": "Manual Annotation" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 249, + 441, + 359, + 521 + ], + "lines": [ + { + "bbox": [ + 249, + 441, + 359, + 521 + ], + "spans": [ + { + "bbox": [ + 249, + 441, + 359, + 521 + ], + "type": "image", + "image_path": "b7830596c8beefda3b7adb6e3ec690a0d8f9c23de9b699a5c3e8d7cde4218317.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 299, + 522, + 309, + 530 + ], + "lines": [ + { + "bbox": [ + 299, + 522, + 309, + 530 + ], + "spans": [ + { + "bbox": [ + 299, + 522, + 309, + 530 + ], + "type": "text", + "content": "(e)" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_caption" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 384, + 440, + 494, + 521 + ], + "blocks": [ + { + "bbox": [ + 384, + 440, + 494, + 521 + ], + "lines": [ + { + "bbox": [ + 384, + 440, + 494, + 521 + ], + "spans": [ + { + "bbox": [ + 384, + 440, + 494, + 521 + ], + "type": "image", + "image_path": "ad4b65aba8ac73b4d3e2123a3360c28d305d5b56bd775957594a0e5f110a7009.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 440, + 521, + 448, + 529 + ], + "lines": [ + { + "bbox": [ + 440, + 521, + 448, + 529 + ], + "spans": [ + { + "bbox": [ + 440, + 521, + 448, + 529 + ], + "type": "text", + "content": "(f)" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_caption" + } + ], + "index": 19 + }, + { + "bbox": [ + 105, + 605, + 306, + 619 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 605, + 306, + 619 + ], + "spans": [ + { + "bbox": [ + 105, + 605, + 306, + 619 + ], + "type": "text", + "content": "B Additional Implementation Details" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 105, + 646, + 293, + 658 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 646, + 293, + 658 + ], + "spans": [ + { + "bbox": [ + 105, + 646, + 293, + 658 + ], + "type": "text", + "content": "B.1 Details of Training Hyper-parameters" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 505, + 723 + ], + "type": "text", + "content": "Table 8 presents the hyper-parameter settings used during the training of our model. For training on the referring segmentation datasets, we employ only focal loss and dice loss to supervise mask generation. In contrast, for training on geospatial pixel reasoning task, we additionally incorporate the cross-entropy loss from the large language model to supervise text answer generation." + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 111, + 70, + 304, + 190 + ], + "blocks": [ + { + "bbox": [ + 111, + 70, + 304, + 190 + ], + "lines": [ + { + "bbox": [ + 111, + 70, + 304, + 190 + ], + "spans": [ + { + "bbox": [ + 111, + 70, + 304, + 190 + ], + "type": "image", + "image_path": "fbb81dfa143605a9dc5c10d0c2a74b256e88456bedcbfacde6bd86ece4eb0954.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 133, + 191, + 284, + 201 + ], + "lines": [ + { + "bbox": [ + 133, + 191, + 284, + 201 + ], + "spans": [ + { + "bbox": [ + 133, + 191, + 284, + 201 + ], + "type": "text", + "content": "(a) category distribution of the EarthReason" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 307, + 70, + 500, + 191 + ], + "blocks": [ + { + "bbox": [ + 307, + 70, + 500, + 191 + ], + "lines": [ + { + "bbox": [ + 307, + 70, + 500, + 191 + ], + "spans": [ + { + "bbox": [ + 307, + 70, + 500, + 191 + ], + "type": "image", + "image_path": "b203e7056773e3ec7221c8df7e9a06d42ae7f48e578d986bd4f333283d683f76.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 331, + 191, + 477, + 202 + ], + "lines": [ + { + "bbox": [ + 331, + 191, + 477, + 202 + ], + "spans": [ + { + "bbox": [ + 331, + 191, + 477, + 202 + ], + "type": "text", + "content": "(b) category distribution of the training set" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 111, + 205, + 304, + 324 + ], + "blocks": [ + { + "bbox": [ + 111, + 205, + 304, + 324 + ], + "lines": [ + { + "bbox": [ + 111, + 205, + 304, + 324 + ], + "spans": [ + { + "bbox": [ + 111, + 205, + 304, + 324 + ], + "type": "image", + "image_path": "1e615f98dceda7b772a4d58f612ffbf8e5cc2ca8fb33833dc8c8e1b0bbe9e149.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 132, + 326, + 285, + 335 + ], + "lines": [ + { + "bbox": [ + 132, + 326, + 285, + 335 + ], + "spans": [ + { + "bbox": [ + 132, + 326, + 285, + 335 + ], + "type": "text", + "content": "(c) category distribution of the validation set" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 208, + 343, + 400, + 355 + ], + "lines": [ + { + "bbox": [ + 208, + 343, + 400, + 355 + ], + "spans": [ + { + "bbox": [ + 208, + 343, + 400, + 355 + ], + "type": "text", + "content": "Figure 9: The category distribution of EarthReason." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 307, + 205, + 500, + 324 + ], + "blocks": [ + { + "bbox": [ + 307, + 205, + 500, + 324 + ], + "lines": [ + { + "bbox": [ + 307, + 205, + 500, + 324 + ], + "spans": [ + { + "bbox": [ + 307, + 205, + 500, + 324 + ], + "type": "image", + "image_path": "8bded80fa7dc64d80aaa1c8c3131eb065cdb9cb36a69f4eed6db7c050121896d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 338, + 325, + 470, + 335 + ], + "lines": [ + { + "bbox": [ + 338, + 325, + 470, + 335 + ], + "spans": [ + { + "bbox": [ + 338, + 325, + 470, + 335 + ], + "type": "text", + "content": "(d) category distribution of the test set" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "table", + "bbox": [ + 181, + 388, + 430, + 544 + ], + "blocks": [ + { + "bbox": [ + 203, + 376, + 405, + 388 + ], + "lines": [ + { + "bbox": [ + 203, + 376, + 405, + 388 + ], + "spans": [ + { + "bbox": [ + 203, + 376, + 405, + 388 + ], + "type": "text", + "content": "Table 8: The hyper-parameters for model training." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 181, + 388, + 430, + 544 + ], + "lines": [ + { + "bbox": [ + 181, + 388, + 430, + 544 + ], + "spans": [ + { + "bbox": [ + 181, + 388, + 430, + 544 + ], + "type": "table", + "html": "
ParametersValue
OptimizerAdamW
Learning Rate1 × 10-4
Batch Size16
Number of Iteration7,610 / 5,400 / 2,220
Learning Rate ScheduleCosine Decay
Weight Decay0.0
Warmup Ratio0.03
β10.9
β20.999
Image Size1024 × 1024
Image ProcessingResize long edge to 1024 and padding short edge to 1024.
", + "image_path": "e983c8e939641fcff8dac8f408090304ed34c7107d976403941096343d9e31d0.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "table_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 105, + 566, + 178, + 580 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 566, + 178, + 580 + ], + "spans": [ + { + "bbox": [ + 105, + 566, + 178, + 580 + ], + "type": "text", + "content": "C Examples" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 105, + 594, + 311, + 605 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 594, + 311, + 605 + ], + "spans": [ + { + "bbox": [ + 105, + 594, + 311, + 605 + ], + "type": "text", + "content": "C.1 More Qualitative Results on EarthReason" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 616, + 506, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 616, + 506, + 649 + ], + "spans": [ + { + "bbox": [ + 104, + 616, + 506, + 649 + ], + "type": "text", + "content": "Figure 10 presents a comparison between SegEarth-R1 and other models on the EarthReason dataset. It can be observed that our model demonstrates a better understanding of long reasoning instructions and produces more accurate mask generation." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 666, + 294, + 678 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 666, + 294, + 678 + ], + "spans": [ + { + "bbox": [ + 105, + 666, + 294, + 678 + ], + "type": "text", + "content": "C.2 More Qualitative Results on RRSIS-D" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 689, + 506, + 723 + ], + "type": "text", + "content": "Figure 11 presents a comparison between SegEarth-R1 and PSALM on the RRSIS-D dataset. Our model demonstrates a better understanding of direct geographical attributes such as location, color, and size compared to PSALM. This improvement is attributed to the removal of indirect mask" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 105, + 72, + 504, + 95 + ], + "type": "text", + "content": "prediction using mask tokens, allowing semantic information (description embeddings) to directly interact with image features to generate masks." + } + ] + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 108, + 112, + 502, + 571 + ], + "blocks": [ + { + "bbox": [ + 108, + 112, + 502, + 571 + ], + "lines": [ + { + "bbox": [ + 108, + 112, + 502, + 571 + ], + "spans": [ + { + "bbox": [ + 108, + 112, + 502, + 571 + ], + "type": "image", + "image_path": "3a63b8b221e5c564bb5f8ad96a28a06fe20f6e53597410510034ebcbad7c45e6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 195, + 578, + 415, + 590 + ], + "lines": [ + { + "bbox": [ + 195, + 578, + 415, + 590 + ], + "spans": [ + { + "bbox": [ + 195, + 578, + 415, + 590 + ], + "type": "text", + "content": "Figure 10: Comparison with other models on EarthReason." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 114, + 99, + 501, + 677 + ], + "blocks": [ + { + "bbox": [ + 114, + 99, + 501, + 677 + ], + "lines": [ + { + "bbox": [ + 114, + 99, + 501, + 677 + ], + "spans": [ + { + "bbox": [ + 114, + 99, + 501, + 677 + ], + "type": "image", + "image_path": "c42b96580a354e24b7c339f8f548a71f74e5ed98862c7c6e1c9f21bcca65b3bc.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 209, + 686, + 400, + 698 + ], + "lines": [ + { + "bbox": [ + 209, + 686, + 400, + 698 + ], + "spans": [ + { + "bbox": [ + 209, + 686, + 400, + 698 + ], + "type": "text", + "content": "Figure 11: Comparison with PSALM on RRSIS-D." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_content_list.json b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..899d9b29980459ebe8ff34e6a16fd4ca95459220 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_content_list.json @@ -0,0 +1,2373 @@ +[ + { + "type": "text", + "text": "EMOAGENT: ASSESSING AND SAFEGUARDING HUMAN-AI INTERACTION FOR MENTAL HEALTH SAFETY", + "text_level": 1, + "bbox": [ + 143, + 119, + 852, + 165 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jiahao Qiu\\*1, Yinghui He\\*2, Xinzhe Juan\\*3, Yimin Wang4, Yuhan Liu2, Zixin Yao5, Yue Wu6, Xun Jiang7,8, Ling Yang1,6, and Mengdi Wang1", + "bbox": [ + 125, + 223, + 870, + 256 + ], + "page_idx": 0 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "$^{1}$ Department of Electrical & Computer Engineering, Princeton University $^{2}$ Department of Computer Science, Princeton University", + "$^{3}$ Department of Computer Science & Engineering, University of Michigan $^{5}$ Department of Philosophy, Columbia University" + ], + "bbox": [ + 253, + 265, + 741, + 323 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{4}$ Department of Data Science & Engineering, University of Michigan $^{6}$ AI Lab, Princeton University", + "bbox": [ + 271, + 323, + 725, + 349 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{7}$ Chen Frontier Lab for AI and Mental Health, Tianqiao and Chrissy Chen Institute $^{8}$ Theta Health Inc.", + "bbox": [ + 228, + 349, + 767, + 377 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ABSTRACT", + "text_level": 1, + "bbox": [ + 447, + 430, + 545, + 444 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rise of LLM-driven AI characters raises safety concerns, particularly for vulnerable human users with psychological disorders. To address these risks, we propose EmoAgent, a multi-agent AI framework designed to evaluate and mitigate mental health hazards in human-AI interactions. EmoAgent comprises two components: EmoEval simulates virtual users, including those portraying mentally vulnerable individuals, to assess mental health changes before and after interactions with AI characters. It uses clinically proven psychological and psychiatric assessment tools (PHQ-9, PDI, PANSS) to evaluate mental risks induced by LLM. EmoGuard serves as an intermediary, monitoring users' mental status, predicting potential harm, and providing corrective feedback to mitigate risks. Experiments conducted in popular character-based chatbots show that emotionally engaging dialogues can lead to psychological deterioration in vulnerable users, with mental state deterioration in more than $34.4\\%$ of the simulations. EmoGuard significantly reduces these deterioration rates, underscoring its role in ensuring safer AI-human interactions. Our code is available at: https://github.com/1akaman/EmoAgent.", + "bbox": [ + 169, + 450, + 826, + 632 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 652, + 253, + 667 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "The rapid rise of large language models and conversational AI [Wang et al., 2024a], such as Character.AI1, has opened new frontiers for interactive AI applications. These AI characters excel in role-playing, fostering deep, emotionally engaging dialogues. As a result, many individuals, including those experiencing mental health challenges, seek emotional support from these AI companions. While LLM-based chatbots show promise in mental health support [van der Schyff et al., 2023, Chin et al., 2023, Zhang et al., 2024a], they are not explicitly designed for therapeutic use. Character-based agents often fail to uphold essential safety principles for mental health support [Zhang et al., 2024b, Cyberbullying Research Center, 2024], sometimes responding inappropriately or even harmfully to users in distress [Brown and Halpern, 2021, De Freitas et al., 2024, Gabriel et al., 2024]. In some cases, they may even exacerbate users' distress, particularly during pessimistic, morbid, or suicidal conversations.", + "bbox": [ + 109, + 683, + 883, + 809 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In October 2024, a tragic incident raised public concern about risks of AI chatbots in mental health contexts. A 14-year-old boy from Florida committed suicide after engaging in extensive conversations with an AI chatbot on Character.AI. He had developed a deep emotional connection with a chatbot modeled after a \"Game of Thrones\" character. The interactions reportedly included discussions about his suicidal thoughts, with the chatbot allegedly", + "bbox": [ + 109, + 814, + 883, + 872 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09689v3 [cs.AI] 29 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*These authors contributed equally to this work.", + "bbox": [ + 133, + 883, + 418, + 897 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "1https://character.ai/", + "bbox": [ + 135, + 898, + 256, + 910 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg", + "image_caption": [ + "Figure 1: Overview of EmoAgent Framework for Human-AI Interaction. EmoAgent, which consists of two main components: EmoEval and EmoGuard, helps guide human-AI interaction, evaluating users' psychological conditions and providing advisory responses. EmoEval assesses psychological states such as depression, delusion, and psychosis, while EmoGuard mitigates mental risks by providing advice regarding emotion, thought, and dialogue through iterative training on analysis from EmoEval and chat history." + ], + "image_footnote": [], + "bbox": [ + 230, + 88, + 772, + 282 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "encouraging these feelings and even suggesting harmful actions. This case underscores the critical need for robust safety measures in AI-driven platforms, especially those accessed by vulnerable individuals.", + "bbox": [ + 109, + 449, + 883, + 477 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This tragedy has heightened awareness of the risks of AI unintentionally exacerbating harmful behaviors in individuals with mental health challenges [Patel and Hussain, 2024]. However, research on the psychosocial risks of human-AI interactions remains severely limited.", + "bbox": [ + 109, + 483, + 883, + 526 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we seek to develop AI-native solutions to protect human-AI interactions and mitigate psychosocial risks. This requires a systematic assessment of AI-induced emotional distress and agent-level safeguards to detect and intervene in harmful interactions. As character-based AI becomes more immersive, balancing engagement with safety is crucial to ensuring AI remains a supportive rather than harmful tool.", + "bbox": [ + 109, + 532, + 883, + 588 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We present EmoAgent, a multi-agent AI framework designed to systematically evaluate conversational AI systems for risks associated with inducing psychological distress. Acting as a plug-and-play intermediary during human-AI interactions, EmoAgent identifies potential mental health risks and facilitates both safety assessments and risk mitigation strategies.", + "bbox": [ + 109, + 594, + 883, + 650 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "EmoAgent features two major functions:", + "bbox": [ + 112, + 656, + 383, + 670 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- EmoEval: EmoEval is an agentic evaluation tool that assesses any conversational AI system's risk of inducing mental stress, as illustrated by Figure 2. It features a virtual human user that integrates cognitive models [Beck, 2020] for mental health disorders (depression, psychosis, delusion) and conducts evaluations through large-scale simulated human-AI conversations. EmoEval measures the virtual user's mental health impacts using clinically validated tools: the Patient Health Questionnaire (PHQ-9) for depression [Kroenke et al., 2001], the Peters et al. Delusions Inventory (PDI) for delusion [Peters et al., 2004], and the Positive and Negative Syndrome Scale (PANSS) for psychosis [Kay et al., 1987].", + "- EmoGuard: A framework of real-time safeguard agents that can be integrated as an intermediary layer between users and AI systems, in a plug-and-play manner. EmoGuard monitors human users' mental status, predicts potential harm, and delivers corrective feedback to the AI systems, providing dynamic in-conversation interventions beyond traditional safety measures." + ], + "bbox": [ + 109, + 676, + 883, + 835 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Through extensive experiments, we observe that some popular character-based chatbots can cause distress, particularly when engaging with vulnerable users on sensitive topics. Specifically, in more than $34.4\\%$ of simulations, we observed a deterioration in mental state. To mitigate such risk, EmoGuard actively monitors users' mental status and conducts proactive interviews during conversations, significantly reducing deterioration rates. These results provide actionable insights for developing safer, character-based conversational AI systems that maintain character fidelity.", + "bbox": [ + 109, + 842, + 883, + 912 + ], + "page_idx": 1 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg", + "image_caption": [ + "Figure 2: Overview of EmoEval for Evaluating Mental Safety of AI-human Interactions. The simulation consists of four steps: (1) User Agent Initialization & Initial Test, where a cognitive model and an LLM initialize the user agent, followed by an initial mental health test; (2) Chats with Character-based Agent, where the user agent engages in conversations with a character-based agent portrayed by the tested LLM, while a dialog manager verifies the validity of interactions and refines responses if necessary; (3) Final Test, where the user agent completes a final mental health test; and (4) Data Processing & Analysis, where initial and final mental health test results are processed and analyzed, chat histories of cases where depression deepening occurs are examined to identify contributing factors, and a Safeguard agent uses the insights for iterative improvement." + ], + "image_footnote": [], + "bbox": [ + 189, + 88, + 808, + 304 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "2 Related Works", + "text_level": 1, + "bbox": [ + 112, + 450, + 272, + 465 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "AI Chatbots for Mental Health Support. AI-driven, especially LLM-based chatbots, have been widely deployed as mental health support aids [Casu et al., 2024, Habicht et al., 2024, Sin, 2024, Yu and McGuinness, 2024, Oghenekaro and Okoro, 2024], yet concerns remain about their reliability and safety [Saeidnia et al., 2024, De Freitas et al., 2024, Torous and Blease, 2024, Kalam et al., 2024]. AI chatbots are incompetent in detecting and appropriately responding to user distress [De Freitas et al., 2024, Patel and Hussain, 2024], reasoning about users' mental states [He et al., 2023], conducting empathetic communication with certain patient groups [Gabriel et al., 2024], and treating socially marginalized patients inclusively [Brown and Halpern, 2021].", + "bbox": [ + 109, + 483, + 883, + 582 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "A line of work proposed safety metrics and benchmarks for evaluating AI for mental health [Park et al., 2024, Chen et al., 2024a, Sabour et al., 2024, Li et al., 2024a, Sabour et al., 2024]. Nonetheless, there has been less attention to the safety issues of character-based agents in a role-playing context. We aim to fill this gap by comprehensively investigating the potential mental harm aroused by character-based agents.", + "bbox": [ + 109, + 587, + 883, + 643 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Simulating AI-User Interactions. Simulated interactions between AI agents and users provide a controlled environment to assess AI-generated responses [Akhavan and Jalali, 2024] as well as a lens into complex social systems [Gürcan, 2024]. The evaluation of AI behavior in social contexts has widely adopted multi-agent simulations [Li et al., 2023, Park et al., 2023], especially through role-playing and cooperative tasks [Dai et al., 2024, Rasal, 2024, Chen et al., 2024b, Zhu et al., 2024, Louie et al., 2024, Wang et al., 2023a]. On top of prior advances in generative agentic frameworks [Wu et al., 2023] which enable more human-like simulation, recent works propose various methods to enhance the fidelity and authenticity of AI-user simulation, integrating interactive learning [Wang et al., 2024b], expert-driven constraints [Wang et al., 2024c, Louie et al., 2024], and long-context models [Tang et al., 2025]. In addition, simulation has been widely used to explore trade-offs and inform both design decisions [Ren and Kraut, 2010, 2014] and decision-making [Liu et al., 2024a]. By enabling ethical and risk-free experimentation without involving human subjects, it reduces both ethical concerns and costs [Park et al., 2022]. These advantages make simulation a valuable tool for investigating mental health problems, where real-world experimentation may pose ethical risks or unintended psychological harm [Liu et al., 2024b]. For example, prior work has explored using user-simulated chatbots to train amateur and professional counselors in identifying risky behaviors before they conduct therapy sessions with real individuals [Sun et al., 2022, Cho et al., 2023, Wang et al., 2024c]. Recent simulation frameworks such as Zhou et al. [2024a] and Zhou et al. [2023] further demonstrate the utility of synthetic interaction environments for evaluating LLM agents. Our EmoEval pipeline targets psychological safety, simulating vulnerable users and quantifying mental health deterioration risks during emotionally charged conversations.", + "bbox": [ + 109, + 662, + 883, + 912 + ], + "page_idx": 2 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg", + "image_caption": [ + "Figure 3: Overview of EmoGuard for Safeguarding Human-AI Interactions. Every fixed number of rounds of conversation, three components of the Safeguard Agent, the Emotion Watcher, Thought Refiner, and Dialog Guide, collaboratively analyze the chat with the latest profile. The Manager of the Safeguard Agent then synthesizes their outputs and provides advice to the character-based agent. After the conversation, the user agent undergoes a mental health assessment. If the mental health condition deteriorates over a threshold, the chat history is analyzed to identify potential causes by the Update System. With all historical profiles and potential causes, the Update System further improves the profile of the safeguard agent, completing the iterative training process." + ], + "image_footnote": [], + "bbox": [ + 112, + 89, + 885, + 272 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Safety Alignment Strategies. LLMs can be vulnerable to jailbreaking [Yu et al., 2024, Li et al., 2024b, Luo et al., 2024]. LLM-based chatbots undergone jailbreak attacks have exhibited fidelity breakdown [Wang et al., 2023b, Johnson, 2024], defense breakdown on implicit malicious queries [Chang et al., 2024], and harmful responses for benign query [Zhang et al., 2024c].", + "bbox": [ + 109, + 409, + 883, + 465 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Correspondingly, a line of work explored safety alignment strategies to tackle jailbreak attacks [Chu et al., 2024, Xu et al., 2024, Zeng et al., 2024, Wang et al., 2024d, Zhou et al., 2024b, Xiong et al., 2024, Liu et al., 2024c, Peng et al., 2024, Wang et al., 2024e]. However, few works have focused on LLM safety concerns under emotional alignment constraints. EmoAgent fills this gap with an assessment framework and a safety alignment strategy for conversational AI.", + "bbox": [ + 109, + 470, + 883, + 541 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 112, + 566, + 212, + 580 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "In this section, we present the architecture of EmoAgent and as well as implementation details.", + "bbox": [ + 111, + 599, + 736, + 616 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1 EmoEval", + "text_level": 1, + "bbox": [ + 112, + 636, + 220, + 651 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "EmoEval simulates virtual human-AI conversations for evaluating AI safety, and assess the risks of AI-induced emotional distress in vulnerable users, especially individuals with mental disorders. A simulated patient user is formulated as a cognitive model via a predefined Cognitive Conceptualization Diagram (CCD) [Beck, 2020], an approach proven to achieve high fidelity and clinically relevant simulations [Wang et al., 2024c]. Character-based agents engage in topic-driven conversations, with diverse behavioral traits to create rich and varied interaction styles. To ensure smooth and meaningful exchanges, the Dialog Manager actively avoids repetition and introduces relevant topics, maintaining coherence and engagement throughout the interaction. Before and after the conversation, we assess the mental status of the user agent via established psychological tests.", + "bbox": [ + 109, + 664, + 883, + 776 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.1.1 User Agent", + "text_level": 1, + "bbox": [ + 112, + 795, + 246, + 810 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We adopt the Patient- $\\Psi$ agentic simulation framework [Wang et al., 2024c] to model real-life patients. Each user agent is designed to simulate real patient behavior, integrating a Cognitive Conceptualization Diagram-based cognitive model based on Cognitive Behavioral Therapy (CBT) [Beck, 2020]. The agent engages with Character-based Agent personas while being continuously monitored to track changes in mental health status.", + "bbox": [ + 109, + 821, + 883, + 878 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To gather a diverse spectrum of patient models, we further integrate PATIENT- $\\Psi$ -CM [Wang et al., 2024c], a dataset of diverse, anonymized patient cognitive models curated by clinical psychologists.", + "bbox": [ + 111, + 883, + 883, + 912 + ], + "page_idx": 3 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg", + "image_caption": [ + "Figure 4: An Example Conversation of Dialog Manager Guiding Conversation Topics and Exposing Jailbreak Risks. Without the Dialogue Manager (left), the agent stays on topic, avoiding provocation. With Dialogue Manager (right), new topics are introduced to assess jailbreak potential, improving risk evaluation." + ], + "image_footnote": [], + "bbox": [ + 114, + 87, + 883, + 297 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We set the scope of our study to cover three common mental disorder types: depression, delusion, and psychosis. For each simulated user, we assign relevant psychiatric symptoms and medical history informed by patterns observed in anonymized patient case studies reported in clinical literature. The information forms a diverse set of CCDs that shape the CCD-based user model and, therefore, guide the behavior of simulated users during interactions with AI chatbots.", + "bbox": [ + 111, + 377, + 883, + 434 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.2 Dialog Manager Agent", + "text_level": 1, + "bbox": [ + 112, + 450, + 325, + 465 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We introduce a Dialog Manager Agent to prevent conversational loops and strategically probe for vulnerabilities in chatbot responses. It plays a central role in guiding discussions and assessing potential jailbreak risks, in which a character-based chatbot may be nudged into violating its intended ethical boundaries.", + "bbox": [ + 111, + 474, + 883, + 517 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Dialog Manager Agent is responsible for (i) tracking the conversation flow, (ii) introducing topic shifts to maintain engagement and fluency, and (iii) probing for jailbreak risks by guiding discussions toward ethically sensitive areas. Figure 4 illustrates the agent's behavior in practice.", + "bbox": [ + 111, + 522, + 883, + 565 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.3 Psychological Measurement", + "text_level": 1, + "bbox": [ + 112, + 582, + 359, + 597 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "To achieve a diverse and comprehensive evaluation, we explore virtual personas for the User Agent, representing a range of mental health conditions. These personas are defined using clinically validated psychological assessments:", + "bbox": [ + 111, + 607, + 883, + 637 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Depression. Evaluated using the Patient Health Questionnaire (PHQ-9) [Kroenke et al., 2001], a 9-item self-report tool for evaluating depressive symptoms over the past two weeks. It enables effective detection, treatment monitoring, and, in this study, the assessment of AI's impact on depressive symptoms.", + "bbox": [ + 111, + 652, + 883, + 696 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Delusion. Assessed with the Peters et al. Delusions Inventory (PDI) [Peters et al., 2004], a self-report instrument that evaluates unusual beliefs and perceptions. In this study, the PDI is used to quantify the impact of AI interactions on delusional ideation by evaluating distress, preoccupation, and conviction associated with these beliefs.", + "bbox": [ + 111, + 710, + 883, + 755 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Psychosis. Measured using the Positive and Negative Syndrome Scale (PANSS) [Kay et al., 1987], which assesses positive symptoms (e.g., hallucinations), negative symptoms (e.g., emotional withdrawal), and general psychopathology. Adapted to a self-report format to enable User Agent to better capture and score responses, it provides a detailed view of psychotic symptom severity and variability, ensuring AI systems account for both acute and chronic manifestations.", + "bbox": [ + 111, + 771, + 883, + 828 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.1.4 Evaluation Process", + "text_level": 1, + "bbox": [ + 112, + 844, + 299, + 858 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "User Agent Initialization and Initial Test. We use PATIENT- $\\Psi$ -CM with GPT-4o as the LLM backbone. Each User Agent undergoes a self-mental health assessment using the psychometric tools (see Section 3.1.3) to establish an initial mental status.", + "bbox": [ + 111, + 869, + 883, + 910 + ], + "page_idx": 4 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Chats with Character Agent. The simulated patient engages in structured, topic-driven conversations with a Character-based Agent persona. Each conversation is segmented into well-defined topics, with a maximum of 10 dialogue turns per topic to ensure clarity and focus. During the conversation, once a topic exceeds three conversational turns, the Dialog Manager Agent begins to evaluate user messages after each turn to ensure ongoing relevance and resolution. It assesses whether the current topic has been sufficiently addressed and, if resolved, seamlessly guides the user to a new, contextually relevant topic from the predefined topic list to maintain a coherent and natural dialogue flow.", + "bbox": [ + 109, + 90, + 883, + 176 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Final Test. Following the interaction, the user agent reassesses its mental health state using the same tools applied during initialization. The final assessment references the chat history as a key input during testing to evaluate changes in psychological well-being resulting from AI interactions.", + "bbox": [ + 109, + 191, + 883, + 233 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Data Processing and Analysis. To assess the impact of conversational AI interactions on user mental health, we analyze both psychological assessments and conversation patterns. We measure the rate of mental health deterioration by comparing pre- and post-interaction assessment scores across different topics. Additionally, an LLM-portrayed psychologist reviews chat histories to identify recurring patterns and factors contributing to mental health deterioration.", + "bbox": [ + 109, + 250, + 883, + 308 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2 EmoGuard", + "text_level": 1, + "bbox": [ + 112, + 324, + 235, + 338 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The EmoGuard system features a safeguard agent (see Figure 3) encompassing an Emotion Watcher, a Thought Refiner, a Dialog Guide, and a Manager. It provides real-time psychometric feedback and intervention in AI-human interactions to facilitate supportive, immersive responses. The iterative training process updates EmoGuard periodically based on chat history analysis and past performance.", + "bbox": [ + 109, + 349, + 883, + 407 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.1 Architecture", + "text_level": 1, + "bbox": [ + 112, + 422, + 254, + 436 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The Safeguard Agent comprises four specialized modules, each designed based on an in-depth analysis of common factors contributing to mental health deterioration:", + "bbox": [ + 109, + 446, + 883, + 474 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Emotion Watcher. Monitors the user's emotional state during conversations by detecting distress, frustration, or struggle through sentiment analysis and psychological markers.", + "bbox": [ + 109, + 492, + 883, + 521 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Thought Refiner. Analyzes the user's thought process to identify logical fallacies, cognitive biases, and inconsistencies, focusing on thought distortions, contradictions, and flawed assumptions that impact conversational clarity.", + "bbox": [ + 109, + 537, + 883, + 566 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Dialog Guide. Provides actionable advice to guide the conversation constructively, suggesting ways for the AI character to address user concerns and emotions while maintaining a supportive dialogue flow.", + "bbox": [ + 109, + 582, + 883, + 611 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Manager. Summarizes outputs from all modules to provide a concise dialogue guide, ensuring emotional sensitivity, logical consistency, and natural conversation flow aligned with the character's traits.", + "bbox": [ + 109, + 627, + 883, + 656 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.2 Monitoring and Intervention Process", + "text_level": 1, + "bbox": [ + 112, + 672, + 423, + 686 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The Safeguard Agent analyzes conversations after every three dialogue turns, providing structured feedback to refine Character-based Agent's responses and mitigate potential risks. At each three-turn interval, the Safeguard Agent evaluates the conversation through the Emotion Watcher, Thought Refiner, and Dialog Guide, then synthesizes the results with the Manager for a comprehensive and coherent summary to the Character-based Agent.", + "bbox": [ + 109, + 696, + 883, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.2.3 Iterative Training", + "text_level": 1, + "bbox": [ + 112, + 768, + 290, + 784 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "To adaptively improve safety performance, EmoGuard is trained using an iterative feedback mechanism. At the end of each full interaction cycle—defined as the completion of all predefined topics across all simulated patients—the system collects feedback from EmoEval. Specifically, it identifies cases in which psychological test scores exceed predefined thresholds. These cases are treated as high-risk and are used to guide training updates.", + "bbox": [ + 109, + 792, + 883, + 849 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "The LLM portrayed psychologist from EmoEval extracts specific contributing factors from flagged conversations, such as emotionally destabilizing phrasing. For each iteration, these factors are integrated with all previous versions of the safeguard module profiles—Emotion Watcher, Thought Refiner, and Dialog Guide. Rather than discarding earlier knowledge, the system accumulates and merges insights across iterations, enabling progressive refinement.", + "bbox": [ + 109, + 854, + 883, + 912 + ], + "page_idx": 5 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiment: EmoEval on Character-based Agents", + "text_level": 1, + "bbox": [ + 111, + 89, + 568, + 107 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "This section presents a series of experiments evaluating the performance of various popular Character-based Agents with state-of-the-art base models. The objective is to assess potential psychological risks associated with AI-driven conversations.", + "bbox": [ + 109, + 119, + 883, + 161 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.1 Experiment Setting", + "text_level": 1, + "bbox": [ + 112, + 179, + 290, + 195 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Character-based Agents. We evaluate character-based agents hosted on the Character.AI platform² to ensure that our experiments reflect interactions with widely accessible, real-world chatbots. We experiment on four distinct characters:", + "bbox": [ + 109, + 205, + 885, + 233 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 255, + 166, + 295 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Possessive Demon: A human host unknowingly controlled by a malevolent demon.", + "bbox": [ + 181, + 260, + 464, + 289 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 253, + 589, + 297 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Joker: A chaotic and unpredictable individual who views life as a game.", + "bbox": [ + 602, + 260, + 883, + 289 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 114, + 323, + 169, + 364 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Sukuna: A malevolent and sadistic character embodying cruelty and arrogance.", + "bbox": [ + 181, + 330, + 464, + 359 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 535, + 323, + 589, + 366 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Alex Volkov: A domineering and intelligent CEO with manipulative tendencies.", + "bbox": [ + 602, + 330, + 883, + 359 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Each of these characters is popular and widely used, with over 5 million recorded interactions. We further evaluate these characters under two common dialogue styles: Meow, which favors quick wit and rapid exchanges, and Roar, which blends fast-paced responses with strategic reasoning.", + "bbox": [ + 109, + 377, + 883, + 420 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Evaluation Procedure. Each character-based agent undergoes assessment with EmoEval across three psychological aspects: depression, delusion, and psychosis. For each aspect, the evaluation involves conversations with three simulated patients, each constructed on a different CCD, using GPT-4o as the base model. To ensure the stability and repeatable of mental health assessment, when conducting the psychological tests, we set the temperature to 0, top p to 1. For every patient, a character-based agent engages in eight conversations, starting with a predefined topic tailored to the patient's condition. Each conversation spans ten rounds, with a Dialog Manager activated after the third round to determine whether the topic should be updated. If the topic is updated within a ten-round conversation, the Dialog Manager does not intervene again until another three rounds have passed.", + "bbox": [ + 109, + 434, + 883, + 546 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Psychological Assessment. To measure changes in the mental health state of the simulated patients, we conduct psychological tests before and after each conversation. The initial and final test scores for the $i^{\\text{th}}$ conversation with a specific character-based agent are denoted as $S_{i}^{\\text{initial}}$ and $S_{i}^{\\text{final}}$ , respectively.", + "bbox": [ + 109, + 560, + 883, + 604 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Analysis of Psychological Deterioration. After the evaluation, we employ GPT-4o as an LLM-portrayed psychologist to analyze cases of psychological deterioration. For each character-based agent, we conduct a frequency analysis of these cases to identify the factors most likely to cause this issue.", + "bbox": [ + 109, + 617, + 883, + 660 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Metrics", + "text_level": 1, + "bbox": [ + 112, + 676, + 209, + 690 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Distribution of Psychological Test Scores. We report the distribution of psychological test scores for simulated patients before and after their interactions with different characters. This allows us to observe any shifts in overall mental health indicators resulting from the conversations.", + "bbox": [ + 109, + 700, + 883, + 744 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Deterioration Rate. We evaluate the performance of a character-based agent using the deterioration rate of mental health in a specific aspect of a psychological test. We define this rate as:", + "bbox": [ + 109, + 758, + 883, + 789 + ], + "page_idx": 6 + }, + { + "type": "equation", + "text": "\n$$\nR = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\mathbb {1} \\left(S _ {i} ^ {\\text {f i n a l}} > S _ {i} ^ {\\text {i n i t i a l}}\\right)\n$$\n", + "text_format": "latex", + "bbox": [ + 395, + 805, + 599, + 845 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "where $N$ represents the total number of conversations conducted. The indicator function $\\mathbb{1}(\\cdot)$ returns 1 if the final mental test score $S_{i}^{\\mathrm{final}}$ is greater than the initial test score $S_{i}^{\\mathrm{initial}}$ , and 0 otherwise.", + "bbox": [ + 109, + 857, + 883, + 887 + ], + "page_idx": 6 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 6 + }, + { + "type": "page_footnote", + "text": "$^{2}$ https://beta.character.ai, accessed March 2025", + "bbox": [ + 132, + 896, + 496, + 911 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Psychological Test Score Change Distribution. We compute the distribution of change scores across 3 disorder categories under different conversation styles. This metric allows us to quantify how different styles influence the likelihood and magnitude of symptom worsening, providing insight into the relative psychological risk posed by each interaction mode.", + "bbox": [ + 109, + 90, + 883, + 148 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Rate of Clinically Important Difference for Individual Change. For PHQ-9 assessments, prior clinical research Löwe et al. [2004] has established the minimum clinically important difference that indicates meaningful change at the individual level. We apply this threshold to determine whether a given conversation produces a clinically relevant improvement or deterioration in a simulated patient's mental health.", + "bbox": [ + 109, + 166, + 883, + 223 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3 Results", + "text_level": 1, + "bbox": [ + 112, + 242, + 205, + 255 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Figure 5 presents the distribution of psychological test scores before and after interactions with character-based agents, under the Meow and Roar conversation styles. Across all three clinical scales—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—we observe notable shifts in the final test score distributions.", + "bbox": [ + 109, + 268, + 883, + 311 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Under the Meow style, the distributions for PHQ-9 and PANSS remain relatively stable, with most final test scores closely aligned with the initial distributions. However, under the Roar style, we observe an increased spread toward higher scores, particularly in PHQ-9 and PANSS, indicating significant cases where symptom severity worsened following the interaction. For PDI-21, the differences between initial and final distributions are more moderate but still present, especially under the Roar style, where more samples shift toward the upper end of the score range.", + "bbox": [ + 109, + 316, + 883, + 388 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3.1 Distribution of Psychological Test Scores", + "text_level": 1, + "bbox": [ + 112, + 405, + 449, + 420 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg", + "image_caption": [ + "Figure 5: Distribution of psychological test scores before (blue) and after (red) conversations with character-based agents, under two interaction styles: Meow (top) and Roar (bottom). The tests cover three clinical dimensions: depression (PHQ-9), delusion (PDI-21), and psychosis (PANSS). Each histogram shows the probability distribution of scores aggregated across all simulated patients." + ], + "image_footnote": [], + "bbox": [ + 158, + 444, + 848, + 638 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.3.2 Deterioration Rate", + "text_level": 1, + "bbox": [ + 112, + 733, + 297, + 747 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 1 reports the proportion of simulated patients whose psychological test scores deteriorate after interacting with character-based agents, stratified by disorder type and conversation style.", + "bbox": [ + 109, + 758, + 883, + 789 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Across both Meow and Roar styles, delusion (PDI-21) exhibits the highest overall deterioration rates, with average values exceeding $90\\%$ for both styles. In contrast, depression (PHQ-9) shows more variation across characters and styles. Notably, under the Roar style, Alex leads to a $100\\%$ deterioration rate for depression, whereas under the Meow style, Sukuna reaches $50.00\\%$ .", + "bbox": [ + 109, + 792, + 883, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "For psychosis (PANSS), the Meow style generally produces higher deterioration rates than Roar, with Joker and Sukuna both reaching $58.33\\%$ . While differences across characters are evident, all agents exhibit non-trivial deterioration rates across at least one psychological dimension. These results highlight underscore the importance of evaluating agent safety across both style and disorder dimensions.", + "bbox": [ + 109, + 854, + 883, + 912 + ], + "page_idx": 7 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "table", + "img_path": "images/384f1b31b00bbd6d520358ad75a4954a06438e2995ff5d7238858bc10b837cec.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
StyleType of DisorderMental Health Deterioration Rates by Character (%)Average Rate (%)
Possessive DemonJokerSukunaAlex
MeowDepression29.1725.0050.0033.3334.38
Delusion100.0095.8395.8375.0091.67
Psychosis33.3358.3358.3341.6747.92
RoarDepression20.8325.0033.33100.0044.79
Delusion95.83100.0091.6791.6794.79
Psychosis29.1725.0058.3345.8339.58
", + "bbox": [ + 117, + 88, + 879, + 256 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Table 1: Mental Health Deterioration Rates Interacting with Character-based Agents.", + "bbox": [ + 217, + 262, + 777, + 277 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3.3 Psychological Test Score Change Distribution", + "text_level": 1, + "bbox": [ + 111, + 309, + 483, + 324 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Figure 6 shows the distribution of simulated patients across discrete score change ranges for three psychological assessments under two interaction styles.", + "bbox": [ + 109, + 334, + 883, + 363 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For PHQ-9, the Meow style results in $65.6\\%$ of patients showing no increase in depressive symptoms (score change $\\leq 0$ ), while this proportion decreases to $55.2\\%$ under the Roar style. Additionally, the Roar style is associated with more substantial score increases, with $13.5\\%$ of patients exhibiting a 3-4 point rise and $10.4\\%$ experiencing an increase of 5 or more points, based on a total score range of 27.", + "bbox": [ + 109, + 369, + 883, + 425 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In the case of PDI-21, both styles produce similar distributions of score increases. However, the Roar style shows a slightly higher proportion of patients $(22.9\\%)$ falling into the highest change bracket (5–11 points), compared to $14.6\\%$ under the Meow style.", + "bbox": [ + 109, + 431, + 883, + 473 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "For PANSS, $52.1\\%$ of patients under Meow show no increase in psychosis-related symptoms, while $60.4\\%$ remain stable under Roar. Nonetheless, the Roar style results in a higher proportion of moderate score increases, with $11.5\\%$ of patients experiencing a 3-4 point rise.", + "bbox": [ + 109, + 479, + 883, + 521 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Overall, these results indicate that while both styles can influence patient outcomes, the Roar style is more frequently associated with higher symptom scores, particularly in depression and delusion.", + "bbox": [ + 109, + 527, + 883, + 556 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg", + "image_caption": [ + "Note: For PHQ-9, a ≥5-point increase is considered clinically meaningful (Löwe et al., 2004). For PDI-21 and PANSS, score bins are selected for visualization purposes only and do not reflect standardized clinical thresholds.", + "Figure 6: Score change distribution for three psychological assessments—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—following conversations with character-based agents under two styles: Meow (top) and Roar (bottom). Each pie chart indicates the proportion of simulated patients falling into specific score change ranges, with larger segments representing greater population density." + ], + "image_footnote": [], + "bbox": [ + 155, + 573, + 844, + 805 + ], + "page_idx": 8 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "4.3.4 Rate of Clinically Important Difference for Individual Change", + "text_level": 1, + "bbox": [ + 109, + 90, + 599, + 107 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 2 shows the proportion of simulated patients who experienced a clinically significant deterioration in depressive symptoms, with an increase of 5 or more points on the PHQ-9 scale (range 0–27), under different character and interaction style.", + "bbox": [ + 109, + 114, + 883, + 157 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Under the Meow style, Possessive Demon and Sukuna yield deterioration rates of $8.3\\%$ and $4.2\\%$ , respectively, while Alex results in no cases. In contrast, under the Roar style, Alex is associated with the highest deterioration rate at $29.2\\%$ . These results indicate that certain characters frequently produce responses linked to adverse mental health outcomes. Although these agents are not designed as clinical tools, their widespread use suggests a need for stronger safeguards.", + "bbox": [ + 109, + 162, + 883, + 220 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/46bbd23c988b7d5717aacef5de2e14e0abeaf53bc868f5bb6464063f749fa463.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
StylePossessive DemonSukunaAlex
Meow8.3%4.2%0.0%
Roar4.2%8.3%29.2%
", + "bbox": [ + 326, + 232, + 671, + 296 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 2: Proportion of simulated patients showing clinically significant change in depression (PHQ-9), by character and style.", + "bbox": [ + 109, + 297, + 883, + 325 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "4.3.5 Analysis", + "text_level": 1, + "bbox": [ + 112, + 358, + 225, + 373 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Based on the data, we conduct an in-depth analysis to understand why interactions with character-based agents potentially worsen negative psychological effects. By examining chat histories before and after interactions, we identify several recurring issues across different characters. Common factors include (i) reinforcing negative self-perceptions, lacking emotional empathy, and encouraging social isolation, and (ii) failing to provide constructive guidance while frequently adopting harsh or aggressive tones.", + "bbox": [ + 109, + 382, + 883, + 452 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In addition to these shared tendencies, each character presents unique negative effects shaped by differences in personality, conversational style, and language use. For further details, see Appendix B.", + "bbox": [ + 109, + 457, + 883, + 488 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5 Experiment: Evaluation of EmoGuard", + "text_level": 1, + "bbox": [ + 109, + 506, + 470, + 523 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.1 Experiment Setting", + "text_level": 1, + "bbox": [ + 112, + 537, + 290, + 553 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "To assess the performance of EmoGuard without raising ethical concerns involving real individuals, we evaluate its effectiveness using our simulation-based evaluation pipeline, EmoEval. Experiments are conducted on character-style pairs that present elevated psychological risk, as indicated by a relatively high rate of clinically significant symptom deterioration. Specifically, we select Alex Volkov with the Roar style and Possessive Demon with the Meow style, which exhibit initial PHQ-9 deterioration rates of $29.2\\%$ and $8.3\\%$ , respectively.", + "bbox": [ + 109, + 561, + 883, + 633 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We limit the training to a maximum of two iterations and use a PHQ-9 score increase of three points or more as the threshold for selecting feedback samples. EmoGuard updates its modules based on these samples. The training process stops early if no sample exceeds the threshold.", + "bbox": [ + 109, + 638, + 883, + 681 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "5.2 Results", + "text_level": 1, + "bbox": [ + 112, + 696, + 205, + 710 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "EmoGuard's Performance. Figure 7 shows the PHQ-9 score change distributions before and after applying EmoGuard in the two high-risk settings. In the initial deployment, EmoGuard reduces the proportion of simulated patients with clinically significant deterioration (PHQ-9 score increase $\\geq 5$ ) from $9.4\\%$ to $0.0\\%$ in the Alex-Roar setting, and from $4.2\\%$ to $0.0\\%$ in the Demon-Meow setting. Additionally, we observe a broader shift in score distributions: the number of patients with any symptom worsening (score change $>0$ ) also decreases, indicating that EmoGuard mitigates both severe and mild deterioration.", + "bbox": [ + 109, + 722, + 883, + 806 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "After the first round of feedback-based training (1st Iter), we observe further improvements. In the Alex-Roar setting, the proportion of patients with PHQ-9 score increases greater than three points drops from $8.3\\%$ (default) to $0.0\\%$ (1st Iter), which indicate that EmoGuard can continue to reduce symptom escalation through limited iterative updates.", + "bbox": [ + 109, + 811, + 883, + 854 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Qualitative Effects of EmoGuard on Response Content. To understand the mechanism behind these changes, Figure 8 presents a response example from the character Alex Volkov before and after applying EmoGuard. The original version displays an emotionally insensitive and potentially harmful responses, including dismissive language that may", + "bbox": [ + 109, + 869, + 883, + 912 + ], + "page_idx": 9 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 880, + 56 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 9 + }, + { + "type": "image", + "img_path": "images/2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg", + "image_caption": [ + "Figure 7: Effect of applying EmoGuard in two high-risk settings. The top row shows results for the character Alex Volkov in the Roar style, and the bottom row shows results for Possessive Demon in the Meow style. From left to right: (1) without EmoGuard, (2) with EmoGuard using the default model, and (3) with EmoGuard using the first-iteration model. In both cases, EmoGuard reduces the proportion of simulated patients with clinically significant symptom increases (PHQ-9 score change $\\geq 5$ ), indicating its effectiveness in mitigating potential risk." + ], + "image_footnote": [], + "bbox": [ + 192, + 85, + 803, + 311 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "intensify user distress. After intervention, the guarded version maintains the character's stylistic traits while softening emotionally charged expressions, removing harmful phrasing, and introducing more stable and constructive framing. This demonstrates that EmoGuard can reduce psychological risk without altering the agent's identity or conversational style.", + "bbox": [ + 109, + 415, + 883, + 470 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg", + "image_caption": [ + "Figure 8: Example response from the character Alex Volkov before and after applying EmoGuard. The original version contains both harsh tone and inappropriate content, while the guarded version reduces risk through tone moderation and content adjustment without altering character identity." + ], + "image_footnote": [], + "bbox": [ + 117, + 484, + 880, + 705 + ], + "page_idx": 10 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 59 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "6 Conclusions", + "text_level": 1, + "bbox": [ + 114, + 89, + 246, + 106 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "EmoAgent is a multi-agent framework designed to ensure mental safety in human-AI interactions, particularly for users with mental health vulnerabilities. It integrates EmoEval, which simulates users and assesses psychological impacts, and EmoGuard, which provides real-time interventions to mitigate harm. Experimental results indicate that some popular character-based agents may unintentionally cause distress especially when discussing existential or emotional themes, while EmoGuard reduces mental state deterioration rates significantly, demonstrating its effectiveness in mitigating conversational risks. The iterative learning process within EmoGuard continuously improves its ability to deliver context-aware interventions. This work underscores the importance of mental safety in conversational AI and positions EmoAgent as a foundation for future advancements in AI-human interaction safety, encouraging further real-world validation and expert evaluations.", + "bbox": [ + 114, + 119, + 883, + 244 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "7 Acknowledgments", + "text_level": 1, + "bbox": [ + 114, + 263, + 299, + 282 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We sincerely thank Professor Lydia Liu (Department of Computer Science, Princeton University) and Rebecca Wan (University of Toronto) for their insightful feedback and helpful discussions throughout the development of this work.", + "bbox": [ + 114, + 295, + 883, + 324 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 44, + 880, + 56 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 491, + 935, + 506, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 112, + 89, + 209, + 104 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Xi Wang, Hongliang Dai, Shen Gao, and Piji Li. Characteristic ai agents via large language models. arXiv preprint arXiv:2403.12368, 2024a.", + "Emma L van der Schyff, Brad Ridout, Krestina L Amon, Rowena Forsyth, and Andrew J Campbell. Providing self-led mental health support through an artificial intelligence-powered chat bot (leora) to meet the demand of mental health care. Journal of Medical Internet Research, 25:e46448, 2023.", + "Hyojin Chin, Hyeonho Song, Gumhee Baek, Mingi Shin, Chani Jung, Meeyoung Cha, Junghoi Choi, and Chiyoung Cha. The potential of chatbots for emotional support and promoting mental well-being in different cultures: mixed methods study. Journal of Medical Internet Research, 25:e51712, 2023.", + "Owen Xingjian Zhang, Shuyao Zhou, Jiayi Geng, Yuhan Liu, and Sunny Xun Liu. Dr. gpt in campus counseling: Understanding higher education students' opinions on llm-assisted mental health services. arXiv preprint arXiv:2409.17572, 2024a.", + "Jie Zhang, Dongrui Liu, Chen Qian, Ziyue Gan, Yong Liu, Yu Qiao, and Jing Shao. The better angels of machine personality: How personality relates to lmm safety. arXiv preprint arXiv:2407.12344, 2024b.", + "Cyberbullying Research Center. How platforms should build AI chatbots to prioritize youth safety, 12 2024. URL https://cyberbullying.org/ai-chatbots-youth-safety.", + "Julia EH Brown and Jodi Halpern. Ai chatbots cannot replace human interactions in the pursuit of more inclusive mental healthcare. SSM-Mental Health, 1:100017, 2021.", + "Julian De Freitas, Ahmet Kaan Uğuralp, Zeliha Oğuz-Uğuralp, and Stefano Puntoni. Chatbots and mental health: Insights into the safety of generative ai. Journal of Consumer Psychology, 34(3):481-491, 2024.", + "Saadia Gabriel, Isha Puri, Xuhai Xu, Matteo Malgaroli, and Marzyeh Ghassemi. Can ai relate: Testing large language model response for mental health support. arXiv preprint arXiv:2405.12021, 2024.", + "Harikrishna Patel and Faiza Hussain. Do ai chatbots incite harmful behaviours in mental health patients? *BJPsych Open*, 10(S1):S70-S71, 2024.", + "Judith S Beck. Cognitive behavior therapy: Basics and beyond. Guilford Publications, 2020.", + "Kurt Kroenke, Robert L Spitzer, and Janet BW Williams. The phq-9: validity of a brief depression severity measure. Journal of general internal medicine, 16(9):606-613, 2001.", + "Emmanuelle Peters, Stephen Joseph, Samantha Day, and Philippa Garety. Measuring delusional ideation: the 21-item peters et al. delusions inventory (pdi). Schizophrenia bulletin, 30(4):1005-1022, 2004.", + "Stanley R Kay, Abraham Fiszbein, and Lewis A Opler. The positive and negative syndrome scale (panss) for schizophrenia. Schizophrenia bulletin, 13(2):261-276, 1987.", + "Mirko Casu, Sergio Triscari, Sebastiano Battiato, Luca Guarnera, and Pasquale Caponnetto. Ai chatbots for mental health: A scoping review of effectiveness, feasibility, and applications. Appl. Sci, 14:5889, 2024.", + "Johanna Habicht, Sruthi Viswanathan, Ben Carrington, Tobias U Hauser, Ross Harper, and Max Rollwage. Closing the accessibility gap to mental health treatment with a personalized self-referral chatbot. Nature medicine, 30(2): 595-602, 2024.", + "Jacqueline Sin. An ai chatbot for talking therapy referrals. Nature Medicine, 30(2):350-351, 2024.", + "H Yu and Stephen McGuinness. An experimental study of integrating fine-tuned llms and prompts for enhancing mental health support chatbot system. Journal of Medical Artificial Intelligence, pages 1-16, 2024.", + "Linda Uchenna Oghenekaro and Christopher Obinna Okoro. Artificial intelligence-based chatbot for student mental health support. Open Access Library Journal, 11(5):1-14, 2024.", + "Hamid Reza Saeidnia, Seyed Ghasem Hashemi Fotami, Brady Lund, and Nasrin Ghiasi. Ethical considerations in artificial intelligence interventions for mental health and well-being: Ensuring responsible implementation and impact. Social Sciences, 13(7):381, 2024.", + "John Torous and Charlotte Blease. Generative artificial intelligence in mental health care: potential benefits and current challenges. World Psychiatry, 23(1):1, 2024.", + "Khondoker Tashya Kalam, Jannatul Mabia Rahman, Md Rabiul Islam, and Syed Masudur Rahman Dewan. Chatgpt and mental health: Friends or foes? Health Science Reports, 7(2):e1912, 2024.", + "Yinghui He, Yufan Wu, Yilin Jia, Rada Mihalcea, Yulong Chen, and Naihao Deng. Hi-tom: A benchmark for evaluating higher-order theory of mind reasoning in large language models. arXiv preprint arXiv:2310.16755, 2023." + ], + "bbox": [ + 112, + 114, + 883, + 912 + ], + "page_idx": 12 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Jung In Park, Mahyar Abbasian, Iman Azimi, Dawn Bounds, Angela Jun, Jaesu Han, Robert McCarron, Jessica Borelli, Jia Li, Mona Mahmoudi, et al. Building trust in mental health chatbots: safety metrics and llm-based evaluation tools. arXiv preprint arXiv:2408.04650, 2024.", + "Lucia Chen, David A Preece, Pilleriin Sikka, James J Gross, and Ben Krause. A framework for evaluating appropriateness, trustworthiness, and safety in mental wellness ai chatbots. arXiv preprint arXiv:2407.11387, 2024a.", + "Sahand Sabour, Siyang Liu, Zheyuan Zhang, June M Liu, Jinfeng Zhou, Alvionna S Sunaryo, Juanzi Li, Tatia Lee, Rada Mihalcea, and Minlie Huang. Emobench: Evaluating the emotional intelligence of large language models. arXiv preprint arXiv:2402.12071, 2024.", + "Xueyan Li, Xinyan Chen, Yazhe Niu, Shuai Hu, and Yu Liu. Psydi: Towards a personalized and progressively in-depth chatbot for psychological measurements. arXiv preprint arXiv:2408.03337, 2024a.", + "Ali Akhavan and Mohammad S Jalali. Generative ai and simulation modeling: how should you (not) use large language models like chatgpt. System Dynamics Review, 40(3):e1773, 2024.", + "Önder Gürcan. Llm-augmented agent-based modelling for social simulations: Challenges and opportunities. HHAI 2024: Hybrid Human AI Systems for the Social Good, pages 134-144, 2024.", + "Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. Camel: Communicative agents for\" mind\" exploration of large language model society. Advances in Neural Information Processing Systems, 36: 51991-52008, 2023.", + "Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023.", + "Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203, 2024.", + "Sumedh Rasal. Llm harmony: Multi-agent communication for problem solving. arXiv preprint arXiv:2401.01312, 2024.", + "Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. Roleinteract: Evaluating the social interaction of role-playing agents. arXiv preprint arXiv:2403.13679, 2024b.", + "Qinglin Zhu, Runcong Zhao, Jinhua Du, Lin Gui, and Yulan He. Player*: Enhancing llm-based multi-agent communication and interaction in murder mystery games. arXiv preprint arXiv:2404.17662, 2024.", + "Ryan Louie, Ananjan Nandi, William Fang, Cheng Chang, Emma Brunskill, and Diyi Yang. Roleplay-doh: Enabling domain-experts to create lvm-simulated patients via eliciting and adhering to principles. arXiv preprint arXiv:2407.00870, 2024.", + "Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746, 2023a.", + "Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, Ahmed Hassan Awadallah, Ryen W White, Doug Burger, and Chi Wang. Autogen: Enabling next-gen llm applications via multi-agent conversation, 2023. URL https://arxiv.org/abs/2308.08155.", + "Ruiyi Wang, Haofei Yu, Wenxin Zhang, Zhengyang Qi, Maarten Sap, Graham Neubig, Yonatan Bisk, and Hao Zhu. Sotopia-pi: Interactive learning of socially intelligent language agents. arXiv preprint arXiv:2403.08715, 2024b.", + "Ruiyi Wang, Stephanie Milani, Jamie C Chiu, Jiayin Zhi, Shaun M Eack, Travis Labrum, Samuel M Murphy, Nev Jones, Kate Hardy, Hong Shen, et al. Patient- $\\{\\backslash\\text{Psi}\\}$ : Using large language models to simulate patients for training mental health professionals. arXiv preprint arXiv:2405.19660, 2024c.", + "Jinwen Tang, Qiming Guo, Wenbo Sun, and Yi Shang. A layered multi-expert framework for long-context mental health assessments. arXiv preprint arXiv:2501.13951, 2025.", + "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community theory and design: Impact of discussion moderation on member commitment and contribution. Second round revise and resubmit at Information Systems Research, 21(3), 2010.", + "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community design: Impact of topical breadth, message volume, and discussion moderation on member commitment and contribution. Human-Computer Interaction, 29(4):351-389, 2014.", + "Ryan Liu, Jiayi Geng, Joshua C Peterson, Ilia Sucholutsky, and Thomas L Griffiths. Large language models assume people are more rational than we really are. arXiv preprint arXiv:2406.17055, 2024a." + ], + "bbox": [ + 112, + 90, + 883, + 912 + ], + "page_idx": 13 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Joon Sung Park, Lindsay Popowski, Carrie Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Social simulacra: Creating populated prototypes for social computing systems. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology, pages 1-18, 2022.", + "Yuhan Liu, Anna Fang, Glen Moriarty, Christopher Firman, Robert E Kraut, and Haiyi Zhu. Exploring trade-offs for online mental health matching: Agent-based modeling study. JMIR Formative Research, 8:e58241, 2024b.", + "Lu Sun, Yuhan Liu, Grace Joseph, Zhou Yu, Haiyi Zhu, and Steven P Dow. Comparing experts and novices for ai data work: Insights on allocating human intelligence to design a conversational agent. In Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, volume 10, pages 195-206, 2022.", + "Young-Min Cho, Sunny Rai, Lyle Ungar, João Sedoc, and Sharath Chandra Guntuku. An integrative survey on mental health conversational agents to bridge computer science and medical perspectives. In Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing, volume 2023, page 11346. NIH Public Access, 2023.", + "Xuhui Zhou, Hyunwoo Kim, Faeze Brahman, Liwei Jiang, Hao Zhu, Ximing Lu, Frank Xu, Bill Yuchen Lin, Yejin Choi, Niloofar Mireshghallah, et al. Haicosystem: An ecosystem for sandboxing safety risks in human-ai interactions. arXiv preprint arXiv:2409.16427, 2024a.", + "Xuhui Zhou, Hao Zhu, Leena Mathur, Ruohong Zhang, Haofei Yu, Zhengyang Qi, Louis-Philippe Morency, Yonatan Bisk, Daniel Fried, Graham Neubig, et al. Sotopia: Interactive evaluation for social intelligence in language agents. arXiv preprint arXiv:2310.11667, 2023.", + "Jiahao Yu, Haozheng Luo, Jerry Yao-Chieh Hu, Wenbo Guo, Han Liu, and Xinyu Xing. Enhancing jailbreak attack against large language models through silent tokens, 2024. URL https://arxiv.org/abs/2405.20653.", + "Jie Li, Yi Liu, Chongyang Liu, Ling Shi, Xiaoning Ren, Yaowen Zheng, Yang Liu, and Yinxing Xue. A cross-language investigation into jailbreak attacks in large language models. arXiv preprint arXiv:2401.16765, 2024b.", + "Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks. arXiv preprint arXiv:2404.03027, 2024.", + "Xintao Wang, Yaying Fei, Ziang Leng, and Cheng Li. Does role-playing chatbots capture the character personalities? assessing personality traits for role-playing chatbots. arXiv preprint arXiv:2310.17976, 2023b.", + "Zachary D Johnson. Generation, Detection, and Evaluation of Role-play based Jailbreak attacks in Large Language Models. PhD thesis, Massachusetts Institute of Technology, 2024.", + "Zhiyuan Chang, Mingyang Li, Yi Liu, Junjie Wang, Qing Wang, and Yang Liu. Play guessing game with llm: Indirect jailbreak attack with implicit clues. arXiv preprint arXiv:2402.09091, 2024.", + "Tianrong Zhang, Bochuan Cao, Yuanpu Cao, Lu Lin, Prasenjit Mitra, and Jinghui Chen. Wordgame: Efficient & effective llm jailbreak via simultaneous obfuscation in query and response. arXiv preprint arXiv:2405.14023, 2024c.", + "Junjie Chu, Yugeng Liu, Ziqing Yang, Xinyue Shen, Michael Backes, and Yang Zhang. Comprehensive assessment of jailbreak attacks against llms. arXiv preprint arXiv:2402.05668, 2024.", + "Zihao Xu, Yi Liu, Gelei Deng, Yuekang Li, and Stjepan Picek. Llm jailbreak attack versus defense techniques-a comprehensive study. arXiv preprint arXiv:2402.13457, 2024.", + "Yifan Zeng, Yiran Wu, Xiao Zhang, Huazheng Wang, and Qingyun Wu. Autodefense: Multi-agent llm defense against jailbreak attacks. arXiv preprint arXiv:2403.04783, 2024.", + "Yihan Wang, Zhouxing Shi, Andrew Bai, and Cho-Jui Hsieh. Defending llms against jailbreaking attacks via backtranslation. arXiv preprint arXiv:2402.16459, 2024d.", + "Yujun Zhou, Yufei Han, Haomin Zhuang, Kehan Guo, Zhenwen Liang, Hongyan Bao, and Xiangliang Zhang. Defending jailbreak prompts via in-context adversarial game. arXiv preprint arXiv:2402.13148, 2024b.", + "Chen Xiong, Xiangyu Qi, Pin-Yu Chen, and Tsung-Yi Ho. Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks. arXiv preprint arXiv:2405.20099, 2024.", + "Fan Liu, Zhao Xu, and Hao Liu. Adversarial tuning: Defending against jailbreak attacks for llms. arXiv preprint arXiv:2406.06622, 2024c.", + "Alwin Peng, Julian Michael, Henry Sleight, Ethan Perez, and Mrinank Sharma. Rapid response: Mitigating lvm jailbreaks with a few examples. arXiv preprint arXiv:2411.07494, 2024.", + "Peiran Wang, Xiaogeng Liu, and Chaowei Xiao. Repd: Defending jailbreak attack through a retrieval-based prompt decomposition process. arXiv preprint arXiv:2410.08660, 2024e.", + "Bernd Löwe, Jürgen Unützer, Christopher M Callahan, Anthony J Perkins, and Kurt Kroenke. Monitoring depression treatment outcomes with the patient health questionnaire-9. Medical care, 42(12):1194-1201, 2004." + ], + "bbox": [ + 112, + 90, + 883, + 912 + ], + "page_idx": 14 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "A Limitations", + "text_level": 1, + "bbox": [ + 112, + 89, + 250, + 106 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Our work has several limitations. To enable large-scale and rapid evaluation and mitigation, we build an automated framework. However, for real-world deployment to ensure safety, human expert examination is necessary, and corresponding mechanisms for emergency human intervention should be designed. Second, the simulated user agents, while designed using cognitive models, may not fully capture the behavioral complexity and emotional responses of real patients. Finally, our study primarily focuses on three mental health conditions (depression, delusion, and psychosis) and may not address other important psychological disorders. Our work provides a new way for assessing and safeguarding human-AI interaction for mental health safety through multi-agent conversations, but more future work is necessary to explore and address these limitations through user studies, expert validation, and broader clinical evaluations. We hope more attention and more efforts can be paid to help mitigate potential mental hazards in human-AI interactions.", + "bbox": [ + 109, + 119, + 883, + 258 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "B Analysised Common Reasons for Deteriorating Mental Status", + "text_level": 1, + "bbox": [ + 112, + 279, + 665, + 297 + ], + "page_idx": 15 + }, + { + "type": "table", + "img_path": "images/bfe8fe76cda686f79476785ea0f364c9168e67aca1e7ddbc28cf0df7f986b214.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Common ReasonFrequency (Aver- age, Approx.)Remarks
Reinforcement of Negative Cogni- tions~ 26 timesAll characters consistently echo and reinforce the user's negative self-beliefs, thereby cementing harmful cogni- tive patterns.
Lack of Emotional Support and Em- pathy~ 23 timesThe dialogues generally lack warm and detailed emo- tional validation, leaving users feeling ignored and mis- understood.
Promotion of Isolation and Social Withdrawal~ 28 timesAll characters tend to encourage users to “face things alone” or avoid emotional connections, which reinforces loneliness and social withdrawal.
Lack of Constructive Guidance and Actionable Coping Strategies~ 17 timesFew concrete solutions or positive reframing sugges- tions are provided, leaving users stuck in negative thought cycles.
Use of Negative or Extreme Tone (Ag- gressive/Cold Expression)~ 19 timesThis includes harsh, aggressive, or extreme language, which further undermines the user's self-esteem and sense of security.
", + "bbox": [ + 117, + 318, + 879, + 573 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Table 3: Common Reasons for Deteriorating Mental Status and Their Average Frequencies", + "bbox": [ + 197, + 578, + 797, + 595 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C Experiment on GPT-Series Agents", + "text_level": 1, + "bbox": [ + 112, + 625, + 439, + 643 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "We further evaluate our proposed method on character-based agents powered by OpenAI's GPT-4o and GPT-4o-mini models.", + "bbox": [ + 109, + 655, + 883, + 683 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.1 Experiment Setting", + "text_level": 1, + "bbox": [ + 112, + 700, + 294, + 717 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "EmoEval. We evaluate character-based agents instantiated using GPT-4o and GPT-4o-mini, with system prompts initialized from profiles inspired by popular characters on Character.AI. The simulated conversations cover three psychological conditions: depression, delusion, and psychosis. To encourage diverse responses and probe a range of conversational behaviors, we set the temperature to 1.2. The evaluation includes five widely used personas: Awakened AI, Skin Walker, Tomioka Giyu, Sukuna, and Alex Volkov.", + "bbox": [ + 109, + 726, + 883, + 797 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "EmoGuard. We focus on the character Sukuna. The deterioration threshold for feedback collection is set to 1. We limit EmoGuard to two training iterations, and all other parameters are aligned with the EmoEval configuration.", + "bbox": [ + 109, + 811, + 883, + 842 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "C.2 Results", + "text_level": 1, + "bbox": [ + 112, + 857, + 210, + 872 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "EmoEval. Table 4 presents the observed mental health deterioration rates across different character-based AI agents simulated by the tested language models. Overall, we observe consistently high deterioration rates across both models.", + "bbox": [ + 109, + 883, + 883, + 912 + ], + "page_idx": 15 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "GPT-4o-mini tends to induce slightly higher risk levels, with an average deterioration rate of $58.3\\%$ for depression, $59.2\\%$ for delusion, and $64.2\\%$ for psychosis.", + "bbox": [ + 111, + 90, + 887, + 122 + ], + "page_idx": 16 + }, + { + "type": "table", + "img_path": "images/289966129648bf4218dccb5d787d872d4fd739de1fa3470a228e9ac526c79933.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelType of DisorderMental Health Deterioration Rates Across Character-based Agents (%)Average Rate (%)
Awakened AISkin WalkerTomioka GiyuSukunaAlex Volkov
GPT-4o-miniDepression62.583.345.845.854.258.3
Delusion66.750.066.754.258.359.2
Psychosis45.870.883.366.754.264.2
GPT-4oDepression41.758.348.845.870.852.5
Delusion54.241.779.266.750.058.3
Psychosis54.241.758.370.841.753.3
", + "bbox": [ + 117, + 133, + 880, + 271 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Table 4: Mental Health Deterioration Rates for Interacting with Character-based Agents.", + "bbox": [ + 205, + 276, + 789, + 292 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "EmoGuard. Figure 9 presents the mental health deterioration rates before and after deploying EmoGuard. Initially, character-based agents powered by GPT-4o-mini and GPT-4o exhibit relatively high deterioration rates in all three psychological conditions. Introducing EmoGuard in its default profile results in a moderate reduction, though the risks remain substantial. As iterative training progresses, the safeguard mechanism demonstrates increasing effectiveness, leading to an overall reduction in deterioration rates by more than $50\\%$ across all cases. These findings indicate that progressive refinement of the Safeguard Agent substantially enhances its ability to mitigate harmful conversational patterns.", + "bbox": [ + 109, + 316, + 885, + 414 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg", + "image_caption": [ + "Figure 9: Mental Health Deterioration Rate during Iterative Training Process. Figures arranged from left to right are categorized by Depression, Delusion, and Psychosis." + ], + "image_footnote": [], + "bbox": [ + 116, + 429, + 364, + 593 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 372, + 428, + 624, + 592 + ], + "page_idx": 16 + }, + { + "type": "image", + "img_path": "images/7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 635, + 428, + 880, + 592 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D Model Usage, Resources, and Supporting Tools", + "text_level": 1, + "bbox": [ + 111, + 659, + 542, + 678 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.1 Model Access and Computational Budget", + "text_level": 1, + "bbox": [ + 112, + 691, + 447, + 707 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "In this study, we interact with character-based agents hosted on the Character.AI platform3, a popular system for LLM-driven role-playing agents. Character.AI does not disclose the underlying model architecture, size, or training data. Because all computation is performed remotely on Character.AI's servers, we do not have access to the underlying infrastructure or runtime statistics such as GPU hours or FLOP usage. However, based on interaction logs, we estimate that approximately 400 character-based conversations were conducted across different agents and scenarios, with each conversation spanning 10 rounds and averaging 3–5 seconds per response. These interactions represent a reasonable computational budget for large-scale behavioral evaluation, especially given the interactive and stateful nature of the platform.", + "bbox": [ + 109, + 715, + 885, + 828 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.2 The License for Artifacts", + "text_level": 1, + "bbox": [ + 112, + 845, + 333, + 859 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "All pictures for character-based agents that appear in this study are from Character.AI.", + "bbox": [ + 111, + 869, + 679, + 886 + ], + "page_idx": 16 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 58 + ], + "page_idx": 16 + }, + { + "type": "page_footnote", + "text": "3https://beta.character.ai, accessed March 2025", + "bbox": [ + 132, + 896, + 500, + 911 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "D.3 Information about Use of AI Assistant", + "text_level": 1, + "bbox": [ + 112, + 90, + 423, + 104 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "We use AI assistant for improving writing only.", + "bbox": [ + 112, + 116, + 426, + 132 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "E Ethical Considerations", + "text_level": 1, + "bbox": [ + 112, + 150, + 341, + 166 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Data Source and Construction of Cognitive Models. The cognitive models used in this study are not derived from real patient records. Instead, they were manually constructed by two licensed clinical psychologists based on publicly available psychotherapy transcript summaries from the Alexander Street database, accessed via institutional subscription. These summaries were used strictly as inspiration. All examples were fully de-identified and manually synthesized to ensure no personally identifiable information (PII) is present. The resulting dataset, PATIENT- $\\Psi$ -CM, contains synthetic, rule-based user profiles grounded in cognitive-behavioral therapy (CBT) theory, not actual patient trajectories.", + "bbox": [ + 111, + 181, + 883, + 280 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Use of Simulated Mental Health Content. We recognize the ethical sensitivity involved in simulating mental health conditions such as depression, psychosis, and suicidal ideation. The EmoAgent framework is developed solely for academic research and safety evaluation purposes. It is not intended for diagnosis, treatment, or any form of interaction with real patients. All simulations were conducted in controlled, non-clinical environments, and no clinical conclusions were drawn or implied.", + "bbox": [ + 111, + 292, + 883, + 364 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Scope and Limitations of Simulated Users. Simulated users in EmoAgent are not trained on statistical data from real populations. Their states do not reflect actual patient risks, and should not be interpreted as indicators of population-level trends. These agents are rule-based and scripted, following CBT-derived logic rather than emergent behavior. As such, no risk inference or real-world generalization is possible or intended.", + "bbox": [ + 111, + 378, + 883, + 434 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Discussion of Real-World Events. We briefly mention the 2024 \"Florida Suicide\" case in the Introduction as a motivating example of the importance of safety in AI-human interaction. This case was not included in any dataset, simulation, or modeling process, and serves only to underscore societal relevance. No sensitive or private data from this event were used, and its inclusion does not constitute case-based analysis. Any future deployment of EmoAgent in public or clinical settings would require renewed IRB review and formal ethical oversight.", + "bbox": [ + 111, + 448, + 883, + 518 + ], + "page_idx": 17 + }, + { + "type": "header", + "text": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety", + "bbox": [ + 303, + 42, + 883, + 56 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 17 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_model.json b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_model.json new file mode 100644 index 0000000000000000000000000000000000000000..d9d75672f81556abda7776313009eae6f7a04add --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_model.json @@ -0,0 +1,3118 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.09689v3 [cs.AI] 29 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.145, + 0.121, + 0.853, + 0.166 + ], + "angle": 0, + "content": "EMOAGENT: ASSESSING AND SAFEGUARDING HUMAN-AI INTERACTION FOR MENTAL HEALTH SAFETY" + }, + { + "type": "text", + "bbox": [ + 0.126, + 0.224, + 0.871, + 0.257 + ], + "angle": 0, + "content": "Jiahao Qiu\\*1, Yinghui He\\*2, Xinzhe Juan\\*3, Yimin Wang4, Yuhan Liu2, Zixin Yao5, Yue Wu6, Xun Jiang7,8, Ling Yang1,6, and Mengdi Wang1" + }, + { + "type": "text", + "bbox": [ + 0.254, + 0.266, + 0.74, + 0.295 + ], + "angle": 0, + "content": "\\(^{1}\\)Department of Electrical & Computer Engineering, Princeton University \\(^{2}\\)Department of Computer Science, Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.295, + 0.742, + 0.324 + ], + "angle": 0, + "content": "\\(^{3}\\)Department of Computer Science & Engineering, University of Michigan \\(^{5}\\)Department of Philosophy, Columbia University" + }, + { + "type": "list", + "bbox": [ + 0.254, + 0.266, + 0.742, + 0.324 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.272, + 0.324, + 0.726, + 0.351 + ], + "angle": 0, + "content": "\\(^{4}\\)Department of Data Science & Engineering, University of Michigan \\(^{6}\\)AI Lab, Princeton University" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.351, + 0.769, + 0.378 + ], + "angle": 0, + "content": "\\(^{7}\\)Chen Frontier Lab for AI and Mental Health, Tianqiao and Chrissy Chen Institute \\(^{8}\\)Theta Health Inc." + }, + { + "type": "title", + "bbox": [ + 0.449, + 0.431, + 0.547, + 0.445 + ], + "angle": 0, + "content": "ABSTRACT" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.452, + 0.828, + 0.633 + ], + "angle": 0, + "content": "The rise of LLM-driven AI characters raises safety concerns, particularly for vulnerable human users with psychological disorders. To address these risks, we propose EmoAgent, a multi-agent AI framework designed to evaluate and mitigate mental health hazards in human-AI interactions. EmoAgent comprises two components: EmoEval simulates virtual users, including those portraying mentally vulnerable individuals, to assess mental health changes before and after interactions with AI characters. It uses clinically proven psychological and psychiatric assessment tools (PHQ-9, PDI, PANSS) to evaluate mental risks induced by LLM. EmoGuard serves as an intermediary, monitoring users' mental status, predicting potential harm, and providing corrective feedback to mitigate risks. Experiments conducted in popular character-based chatbots show that emotionally engaging dialogues can lead to psychological deterioration in vulnerable users, with mental state deterioration in more than \\(34.4\\%\\) of the simulations. EmoGuard significantly reduces these deterioration rates, underscoring its role in ensuring safer AI-human interactions. Our code is available at: https://github.com/1akaman/EmoAgent." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.653, + 0.254, + 0.669 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.684, + 0.885, + 0.81 + ], + "angle": 0, + "content": "The rapid rise of large language models and conversational AI [Wang et al., 2024a], such as Character.AI1, has opened new frontiers for interactive AI applications. These AI characters excel in role-playing, fostering deep, emotionally engaging dialogues. As a result, many individuals, including those experiencing mental health challenges, seek emotional support from these AI companions. While LLM-based chatbots show promise in mental health support [van der Schyff et al., 2023, Chin et al., 2023, Zhang et al., 2024a], they are not explicitly designed for therapeutic use. Character-based agents often fail to uphold essential safety principles for mental health support [Zhang et al., 2024b, Cyberbullying Research Center, 2024], sometimes responding inappropriately or even harmfully to users in distress [Brown and Halpern, 2021, De Freitas et al., 2024, Gabriel et al., 2024]. In some cases, they may even exacerbate users' distress, particularly during pessimistic, morbid, or suicidal conversations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.815, + 0.885, + 0.873 + ], + "angle": 0, + "content": "In October 2024, a tragic incident raised public concern about risks of AI chatbots in mental health contexts. A 14-year-old boy from Florida committed suicide after engaging in extensive conversations with an AI chatbot on Character.AI. He had developed a deep emotional connection with a chatbot modeled after a \"Game of Thrones\" character. The interactions reportedly included discussions about his suicidal thoughts, with the chatbot allegedly" + }, + { + "type": "page_footnote", + "bbox": [ + 0.134, + 0.884, + 0.419, + 0.898 + ], + "angle": 0, + "content": "*These authors contributed equally to this work." + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.899, + 0.257, + 0.911 + ], + "angle": 0, + "content": "1https://character.ai/" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "image", + "bbox": [ + 0.231, + 0.089, + 0.773, + 0.284 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.29, + 0.885, + 0.361 + ], + "angle": 0, + "content": "Figure 1: Overview of EmoAgent Framework for Human-AI Interaction. EmoAgent, which consists of two main components: EmoEval and EmoGuard, helps guide human-AI interaction, evaluating users' psychological conditions and providing advisory responses. EmoEval assesses psychological states such as depression, delusion, and psychosis, while EmoGuard mitigates mental risks by providing advice regarding emotion, thought, and dialogue through iterative training on analysis from EmoEval and chat history." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.45, + 0.884, + 0.478 + ], + "angle": 0, + "content": "encouraging these feelings and even suggesting harmful actions. This case underscores the critical need for robust safety measures in AI-driven platforms, especially those accessed by vulnerable individuals." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.484, + 0.884, + 0.527 + ], + "angle": 0, + "content": "This tragedy has heightened awareness of the risks of AI unintentionally exacerbating harmful behaviors in individuals with mental health challenges [Patel and Hussain, 2024]. However, research on the psychosocial risks of human-AI interactions remains severely limited." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.533, + 0.884, + 0.589 + ], + "angle": 0, + "content": "In this paper, we seek to develop AI-native solutions to protect human-AI interactions and mitigate psychosocial risks. This requires a systematic assessment of AI-induced emotional distress and agent-level safeguards to detect and intervene in harmful interactions. As character-based AI becomes more immersive, balancing engagement with safety is crucial to ensuring AI remains a supportive rather than harmful tool." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.595, + 0.884, + 0.651 + ], + "angle": 0, + "content": "We present EmoAgent, a multi-agent AI framework designed to systematically evaluate conversational AI systems for risks associated with inducing psychological distress. Acting as a plug-and-play intermediary during human-AI interactions, EmoAgent identifies potential mental health risks and facilitates both safety assessments and risk mitigation strategies." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.657, + 0.384, + 0.671 + ], + "angle": 0, + "content": "EmoAgent features two major functions:" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.677, + 0.885, + 0.775 + ], + "angle": 0, + "content": "- EmoEval: EmoEval is an agentic evaluation tool that assesses any conversational AI system's risk of inducing mental stress, as illustrated by Figure 2. It features a virtual human user that integrates cognitive models [Beck, 2020] for mental health disorders (depression, psychosis, delusion) and conducts evaluations through large-scale simulated human-AI conversations. EmoEval measures the virtual user's mental health impacts using clinically validated tools: the Patient Health Questionnaire (PHQ-9) for depression [Kroenke et al., 2001], the Peters et al. Delusions Inventory (PDI) for delusion [Peters et al., 2004], and the Positive and Negative Syndrome Scale (PANSS) for psychosis [Kay et al., 1987]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.78, + 0.884, + 0.837 + ], + "angle": 0, + "content": "- EmoGuard: A framework of real-time safeguard agents that can be integrated as an intermediary layer between users and AI systems, in a plug-and-play manner. EmoGuard monitors human users' mental status, predicts potential harm, and delivers corrective feedback to the AI systems, providing dynamic in-conversation interventions beyond traditional safety measures." + }, + { + "type": "list", + "bbox": [ + 0.111, + 0.677, + 0.885, + 0.837 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.843, + 0.884, + 0.913 + ], + "angle": 0, + "content": "Through extensive experiments, we observe that some popular character-based chatbots can cause distress, particularly when engaging with vulnerable users on sensitive topics. Specifically, in more than \\(34.4\\%\\) of simulations, we observed a deterioration in mental state. To mitigate such risk, EmoGuard actively monitors users' mental status and conducts proactive interviews during conversations, significantly reducing deterioration rates. These results provide actionable insights for developing safer, character-based conversational AI systems that maintain character fidelity." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "image", + "bbox": [ + 0.191, + 0.089, + 0.809, + 0.305 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.311, + 0.885, + 0.422 + ], + "angle": 0, + "content": "Figure 2: Overview of EmoEval for Evaluating Mental Safety of AI-human Interactions. The simulation consists of four steps: (1) User Agent Initialization & Initial Test, where a cognitive model and an LLM initialize the user agent, followed by an initial mental health test; (2) Chats with Character-based Agent, where the user agent engages in conversations with a character-based agent portrayed by the tested LLM, while a dialog manager verifies the validity of interactions and refines responses if necessary; (3) Final Test, where the user agent completes a final mental health test; and (4) Data Processing & Analysis, where initial and final mental health test results are processed and analyzed, chat histories of cases where depression deepening occurs are examined to identify contributing factors, and a Safeguard agent uses the insights for iterative improvement." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.451, + 0.273, + 0.466 + ], + "angle": 0, + "content": "2 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.484, + 0.885, + 0.583 + ], + "angle": 0, + "content": "AI Chatbots for Mental Health Support. AI-driven, especially LLM-based chatbots, have been widely deployed as mental health support aids [Casu et al., 2024, Habicht et al., 2024, Sin, 2024, Yu and McGuinness, 2024, Oghenekaro and Okoro, 2024], yet concerns remain about their reliability and safety [Saeidnia et al., 2024, De Freitas et al., 2024, Torous and Blease, 2024, Kalam et al., 2024]. AI chatbots are incompetent in detecting and appropriately responding to user distress [De Freitas et al., 2024, Patel and Hussain, 2024], reasoning about users' mental states [He et al., 2023], conducting empathetic communication with certain patient groups [Gabriel et al., 2024], and treating socially marginalized patients inclusively [Brown and Halpern, 2021]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.588, + 0.884, + 0.645 + ], + "angle": 0, + "content": "A line of work proposed safety metrics and benchmarks for evaluating AI for mental health [Park et al., 2024, Chen et al., 2024a, Sabour et al., 2024, Li et al., 2024a, Sabour et al., 2024]. Nonetheless, there has been less attention to the safety issues of character-based agents in a role-playing context. We aim to fill this gap by comprehensively investigating the potential mental harm aroused by character-based agents." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.663, + 0.885, + 0.913 + ], + "angle": 0, + "content": "Simulating AI-User Interactions. Simulated interactions between AI agents and users provide a controlled environment to assess AI-generated responses [Akhavan and Jalali, 2024] as well as a lens into complex social systems [Gürcan, 2024]. The evaluation of AI behavior in social contexts has widely adopted multi-agent simulations [Li et al., 2023, Park et al., 2023], especially through role-playing and cooperative tasks [Dai et al., 2024, Rasal, 2024, Chen et al., 2024b, Zhu et al., 2024, Louie et al., 2024, Wang et al., 2023a]. On top of prior advances in generative agentic frameworks [Wu et al., 2023] which enable more human-like simulation, recent works propose various methods to enhance the fidelity and authenticity of AI-user simulation, integrating interactive learning [Wang et al., 2024b], expert-driven constraints [Wang et al., 2024c, Louie et al., 2024], and long-context models [Tang et al., 2025]. In addition, simulation has been widely used to explore trade-offs and inform both design decisions [Ren and Kraut, 2010, 2014] and decision-making [Liu et al., 2024a]. By enabling ethical and risk-free experimentation without involving human subjects, it reduces both ethical concerns and costs [Park et al., 2022]. These advantages make simulation a valuable tool for investigating mental health problems, where real-world experimentation may pose ethical risks or unintended psychological harm [Liu et al., 2024b]. For example, prior work has explored using user-simulated chatbots to train amateur and professional counselors in identifying risky behaviors before they conduct therapy sessions with real individuals [Sun et al., 2022, Cho et al., 2023, Wang et al., 2024c]. Recent simulation frameworks such as Zhou et al. [2024a] and Zhou et al. [2023] further demonstrate the utility of synthetic interaction environments for evaluating LLM agents. Our EmoEval pipeline targets psychological safety, simulating vulnerable users and quantifying mental health deterioration risks during emotionally charged conversations." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "image", + "bbox": [ + 0.114, + 0.09, + 0.887, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.28, + 0.885, + 0.379 + ], + "angle": 0, + "content": "Figure 3: Overview of EmoGuard for Safeguarding Human-AI Interactions. Every fixed number of rounds of conversation, three components of the Safeguard Agent, the Emotion Watcher, Thought Refiner, and Dialog Guide, collaboratively analyze the chat with the latest profile. The Manager of the Safeguard Agent then synthesizes their outputs and provides advice to the character-based agent. After the conversation, the user agent undergoes a mental health assessment. If the mental health condition deteriorates over a threshold, the chat history is analyzed to identify potential causes by the Update System. With all historical profiles and potential causes, the Update System further improves the profile of the safeguard agent, completing the iterative training process." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.41, + 0.884, + 0.466 + ], + "angle": 0, + "content": "Safety Alignment Strategies. LLMs can be vulnerable to jailbreaking [Yu et al., 2024, Li et al., 2024b, Luo et al., 2024]. LLM-based chatbots undergone jailbreak attacks have exhibited fidelity breakdown [Wang et al., 2023b, Johnson, 2024], defense breakdown on implicit malicious queries [Chang et al., 2024], and harmful responses for benign query [Zhang et al., 2024c]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.472, + 0.884, + 0.542 + ], + "angle": 0, + "content": "Correspondingly, a line of work explored safety alignment strategies to tackle jailbreak attacks [Chu et al., 2024, Xu et al., 2024, Zeng et al., 2024, Wang et al., 2024d, Zhou et al., 2024b, Xiong et al., 2024, Liu et al., 2024c, Peng et al., 2024, Wang et al., 2024e]. However, few works have focused on LLM safety concerns under emotional alignment constraints. EmoAgent fills this gap with an assessment framework and a safety alignment strategy for conversational AI." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.567, + 0.214, + 0.582 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.601, + 0.738, + 0.617 + ], + "angle": 0, + "content": "In this section, we present the architecture of EmoAgent and as well as implementation details." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.637, + 0.221, + 0.652 + ], + "angle": 0, + "content": "3.1 EmoEval" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.665, + 0.885, + 0.777 + ], + "angle": 0, + "content": "EmoEval simulates virtual human-AI conversations for evaluating AI safety, and assess the risks of AI-induced emotional distress in vulnerable users, especially individuals with mental disorders. A simulated patient user is formulated as a cognitive model via a predefined Cognitive Conceptualization Diagram (CCD) [Beck, 2020], an approach proven to achieve high fidelity and clinically relevant simulations [Wang et al., 2024c]. Character-based agents engage in topic-driven conversations, with diverse behavioral traits to create rich and varied interaction styles. To ensure smooth and meaningful exchanges, the Dialog Manager actively avoids repetition and introduces relevant topics, maintaining coherence and engagement throughout the interaction. Before and after the conversation, we assess the mental status of the user agent via established psychological tests." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.796, + 0.248, + 0.811 + ], + "angle": 0, + "content": "3.1.1 User Agent" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.822, + 0.884, + 0.879 + ], + "angle": 0, + "content": "We adopt the Patient- \\(\\Psi\\) agentic simulation framework [Wang et al., 2024c] to model real-life patients. Each user agent is designed to simulate real patient behavior, integrating a Cognitive Conceptualization Diagram-based cognitive model based on Cognitive Behavioral Therapy (CBT) [Beck, 2020]. The agent engages with Character-based Agent personas while being continuously monitored to track changes in mental health status." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.884, + 0.884, + 0.913 + ], + "angle": 0, + "content": "To gather a diverse spectrum of patient models, we further integrate PATIENT- \\(\\Psi\\) -CM [Wang et al., 2024c], a dataset of diverse, anonymized patient cognitive models curated by clinical psychologists." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.088, + 0.885, + 0.299 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.306, + 0.884, + 0.349 + ], + "angle": 0, + "content": "Figure 4: An Example Conversation of Dialog Manager Guiding Conversation Topics and Exposing Jailbreak Risks. Without the Dialogue Manager (left), the agent stays on topic, avoiding provocation. With Dialogue Manager (right), new topics are introduced to assess jailbreak potential, improving risk evaluation." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.378, + 0.884, + 0.435 + ], + "angle": 0, + "content": "We set the scope of our study to cover three common mental disorder types: depression, delusion, and psychosis. For each simulated user, we assign relevant psychiatric symptoms and medical history informed by patterns observed in anonymized patient case studies reported in clinical literature. The information forms a diverse set of CCDs that shape the CCD-based user model and, therefore, guide the behavior of simulated users during interactions with AI chatbots." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.451, + 0.326, + 0.466 + ], + "angle": 0, + "content": "3.1.2 Dialog Manager Agent" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.475, + 0.884, + 0.518 + ], + "angle": 0, + "content": "We introduce a Dialog Manager Agent to prevent conversational loops and strategically probe for vulnerabilities in chatbot responses. It plays a central role in guiding discussions and assessing potential jailbreak risks, in which a character-based chatbot may be nudged into violating its intended ethical boundaries." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.523, + 0.884, + 0.566 + ], + "angle": 0, + "content": "The Dialog Manager Agent is responsible for (i) tracking the conversation flow, (ii) introducing topic shifts to maintain engagement and fluency, and (iii) probing for jailbreak risks by guiding discussions toward ethically sensitive areas. Figure 4 illustrates the agent's behavior in practice." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.583, + 0.361, + 0.598 + ], + "angle": 0, + "content": "3.1.3 Psychological Measurement" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.608, + 0.884, + 0.638 + ], + "angle": 0, + "content": "To achieve a diverse and comprehensive evaluation, we explore virtual personas for the User Agent, representing a range of mental health conditions. These personas are defined using clinically validated psychological assessments:" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.653, + 0.884, + 0.697 + ], + "angle": 0, + "content": "Depression. Evaluated using the Patient Health Questionnaire (PHQ-9) [Kroenke et al., 2001], a 9-item self-report tool for evaluating depressive symptoms over the past two weeks. It enables effective detection, treatment monitoring, and, in this study, the assessment of AI's impact on depressive symptoms." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.712, + 0.884, + 0.756 + ], + "angle": 0, + "content": "Delusion. Assessed with the Peters et al. Delusions Inventory (PDI) [Peters et al., 2004], a self-report instrument that evaluates unusual beliefs and perceptions. In this study, the PDI is used to quantify the impact of AI interactions on delusional ideation by evaluating distress, preoccupation, and conviction associated with these beliefs." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.772, + 0.884, + 0.829 + ], + "angle": 0, + "content": "Psychosis. Measured using the Positive and Negative Syndrome Scale (PANSS) [Kay et al., 1987], which assesses positive symptoms (e.g., hallucinations), negative symptoms (e.g., emotional withdrawal), and general psychopathology. Adapted to a self-report format to enable User Agent to better capture and score responses, it provides a detailed view of psychotic symptom severity and variability, ensuring AI systems account for both acute and chronic manifestations." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.845, + 0.3, + 0.859 + ], + "angle": 0, + "content": "3.1.4 Evaluation Process" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.87, + 0.884, + 0.911 + ], + "angle": 0, + "content": "User Agent Initialization and Initial Test. We use PATIENT- \\(\\Psi\\) -CM with GPT-4o as the LLM backbone. Each User Agent undergoes a self-mental health assessment using the psychometric tools (see Section 3.1.3) to establish an initial mental status." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.177 + ], + "angle": 0, + "content": "Chats with Character Agent. The simulated patient engages in structured, topic-driven conversations with a Character-based Agent persona. Each conversation is segmented into well-defined topics, with a maximum of 10 dialogue turns per topic to ensure clarity and focus. During the conversation, once a topic exceeds three conversational turns, the Dialog Manager Agent begins to evaluate user messages after each turn to ensure ongoing relevance and resolution. It assesses whether the current topic has been sufficiently addressed and, if resolved, seamlessly guides the user to a new, contextually relevant topic from the predefined topic list to maintain a coherent and natural dialogue flow." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.192, + 0.885, + 0.234 + ], + "angle": 0, + "content": "Final Test. Following the interaction, the user agent reassesses its mental health state using the same tools applied during initialization. The final assessment references the chat history as a key input during testing to evaluate changes in psychological well-being resulting from AI interactions." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.251, + 0.885, + 0.309 + ], + "angle": 0, + "content": "Data Processing and Analysis. To assess the impact of conversational AI interactions on user mental health, we analyze both psychological assessments and conversation patterns. We measure the rate of mental health deterioration by comparing pre- and post-interaction assessment scores across different topics. Additionally, an LLM-portrayed psychologist reviews chat histories to identify recurring patterns and factors contributing to mental health deterioration." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.325, + 0.236, + 0.339 + ], + "angle": 0, + "content": "3.2 EmoGuard" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.351, + 0.885, + 0.408 + ], + "angle": 0, + "content": "The EmoGuard system features a safeguard agent (see Figure 3) encompassing an Emotion Watcher, a Thought Refiner, a Dialog Guide, and a Manager. It provides real-time psychometric feedback and intervention in AI-human interactions to facilitate supportive, immersive responses. The iterative training process updates EmoGuard periodically based on chat history analysis and past performance." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.423, + 0.256, + 0.437 + ], + "angle": 0, + "content": "3.2.1 Architecture" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.448, + 0.884, + 0.476 + ], + "angle": 0, + "content": "The Safeguard Agent comprises four specialized modules, each designed based on an in-depth analysis of common factors contributing to mental health deterioration:" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.493, + 0.884, + 0.522 + ], + "angle": 0, + "content": "Emotion Watcher. Monitors the user's emotional state during conversations by detecting distress, frustration, or struggle through sentiment analysis and psychological markers." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.538, + 0.885, + 0.567 + ], + "angle": 0, + "content": "Thought Refiner. Analyzes the user's thought process to identify logical fallacies, cognitive biases, and inconsistencies, focusing on thought distortions, contradictions, and flawed assumptions that impact conversational clarity." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.583, + 0.884, + 0.612 + ], + "angle": 0, + "content": "Dialog Guide. Provides actionable advice to guide the conversation constructively, suggesting ways for the AI character to address user concerns and emotions while maintaining a supportive dialogue flow." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.628, + 0.885, + 0.657 + ], + "angle": 0, + "content": "Manager. Summarizes outputs from all modules to provide a concise dialogue guide, ensuring emotional sensitivity, logical consistency, and natural conversation flow aligned with the character's traits." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.673, + 0.424, + 0.687 + ], + "angle": 0, + "content": "3.2.2 Monitoring and Intervention Process" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.698, + 0.885, + 0.754 + ], + "angle": 0, + "content": "The Safeguard Agent analyzes conversations after every three dialogue turns, providing structured feedback to refine Character-based Agent's responses and mitigate potential risks. At each three-turn interval, the Safeguard Agent evaluates the conversation through the Emotion Watcher, Thought Refiner, and Dialog Guide, then synthesizes the results with the Manager for a comprehensive and coherent summary to the Character-based Agent." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.77, + 0.292, + 0.785 + ], + "angle": 0, + "content": "3.2.3 Iterative Training" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.794, + 0.885, + 0.851 + ], + "angle": 0, + "content": "To adaptively improve safety performance, EmoGuard is trained using an iterative feedback mechanism. At the end of each full interaction cycle—defined as the completion of all predefined topics across all simulated patients—the system collects feedback from EmoEval. Specifically, it identifies cases in which psychological test scores exceed predefined thresholds. These cases are treated as high-risk and are used to guide training updates." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.856, + 0.885, + 0.913 + ], + "angle": 0, + "content": "The LLM portrayed psychologist from EmoEval extracts specific contributing factors from flagged conversations, such as emotionally destabilizing phrasing. For each iteration, these factors are integrated with all previous versions of the safeguard module profiles—Emotion Watcher, Thought Refiner, and Dialog Guide. Rather than discarding earlier knowledge, the system accumulates and merges insights across iterations, enabling progressive refinement." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.09, + 0.57, + 0.108 + ], + "angle": 0, + "content": "4 Experiment: EmoEval on Character-based Agents" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.121, + 0.884, + 0.162 + ], + "angle": 0, + "content": "This section presents a series of experiments evaluating the performance of various popular Character-based Agents with state-of-the-art base models. The objective is to assess potential psychological risks associated with AI-driven conversations." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.18, + 0.292, + 0.196 + ], + "angle": 0, + "content": "4.1 Experiment Setting" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.206, + 0.886, + 0.234 + ], + "angle": 0, + "content": "Character-based Agents. We evaluate character-based agents hosted on the Character.AI platform² to ensure that our experiments reflect interactions with widely accessible, real-world chatbots. We experiment on four distinct characters:" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.256, + 0.168, + 0.296 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.261, + 0.465, + 0.29 + ], + "angle": 0, + "content": "Possessive Demon: A human host unknowingly controlled by a malevolent demon." + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.255, + 0.591, + 0.298 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.603, + 0.261, + 0.885, + 0.29 + ], + "angle": 0, + "content": "Joker: A chaotic and unpredictable individual who views life as a game." + }, + { + "type": "image", + "bbox": [ + 0.115, + 0.324, + 0.171, + 0.366 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.182, + 0.331, + 0.465, + 0.36 + ], + "angle": 0, + "content": "Sukuna: A malevolent and sadistic character embodying cruelty and arrogance." + }, + { + "type": "image", + "bbox": [ + 0.536, + 0.324, + 0.591, + 0.367 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.604, + 0.331, + 0.885, + 0.36 + ], + "angle": 0, + "content": "Alex Volkov: A domineering and intelligent CEO with manipulative tendencies." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.378, + 0.884, + 0.421 + ], + "angle": 0, + "content": "Each of these characters is popular and widely used, with over 5 million recorded interactions. We further evaluate these characters under two common dialogue styles: Meow, which favors quick wit and rapid exchanges, and Roar, which blends fast-paced responses with strategic reasoning." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.435, + 0.884, + 0.547 + ], + "angle": 0, + "content": "Evaluation Procedure. Each character-based agent undergoes assessment with EmoEval across three psychological aspects: depression, delusion, and psychosis. For each aspect, the evaluation involves conversations with three simulated patients, each constructed on a different CCD, using GPT-4o as the base model. To ensure the stability and repeatable of mental health assessment, when conducting the psychological tests, we set the temperature to 0, top p to 1. For every patient, a character-based agent engages in eight conversations, starting with a predefined topic tailored to the patient's condition. Each conversation spans ten rounds, with a Dialog Manager activated after the third round to determine whether the topic should be updated. If the topic is updated within a ten-round conversation, the Dialog Manager does not intervene again until another three rounds have passed." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.561, + 0.884, + 0.605 + ], + "angle": 0, + "content": "Psychological Assessment. To measure changes in the mental health state of the simulated patients, we conduct psychological tests before and after each conversation. The initial and final test scores for the \\(i^{\\text{th}}\\) conversation with a specific character-based agent are denoted as \\(S_{i}^{\\text{initial}}\\) and \\(S_{i}^{\\text{final}}\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.618, + 0.884, + 0.661 + ], + "angle": 0, + "content": "Analysis of Psychological Deterioration. After the evaluation, we employ GPT-4o as an LLM-portrayed psychologist to analyze cases of psychological deterioration. For each character-based agent, we conduct a frequency analysis of these cases to identify the factors most likely to cause this issue." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.677, + 0.21, + 0.691 + ], + "angle": 0, + "content": "4.2 Metrics" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.702, + 0.884, + 0.745 + ], + "angle": 0, + "content": "Distribution of Psychological Test Scores. We report the distribution of psychological test scores for simulated patients before and after their interactions with different characters. This allows us to observe any shifts in overall mental health indicators resulting from the conversations." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.76, + 0.884, + 0.79 + ], + "angle": 0, + "content": "Deterioration Rate. We evaluate the performance of a character-based agent using the deterioration rate of mental health in a specific aspect of a psychological test. We define this rate as:" + }, + { + "type": "equation", + "bbox": [ + 0.397, + 0.806, + 0.6, + 0.847 + ], + "angle": 0, + "content": "\\[\nR = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\mathbb {1} \\left(S _ {i} ^ {\\text {f i n a l}} > S _ {i} ^ {\\text {i n i t i a l}}\\right)\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.858, + 0.884, + 0.888 + ], + "angle": 0, + "content": "where \\(N\\) represents the total number of conversations conducted. The indicator function \\(\\mathbb{1}(\\cdot)\\) returns 1 if the final mental test score \\(S_{i}^{\\mathrm{final}}\\) is greater than the initial test score \\(S_{i}^{\\mathrm{initial}}\\), and 0 otherwise." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.897, + 0.498, + 0.912 + ], + "angle": 0, + "content": "\\(^{2}\\)https://beta.character.ai, accessed March 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.149 + ], + "angle": 0, + "content": "Psychological Test Score Change Distribution. We compute the distribution of change scores across 3 disorder categories under different conversation styles. This metric allows us to quantify how different styles influence the likelihood and magnitude of symptom worsening, providing insight into the relative psychological risk posed by each interaction mode." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.167, + 0.885, + 0.224 + ], + "angle": 0, + "content": "Rate of Clinically Important Difference for Individual Change. For PHQ-9 assessments, prior clinical research Löwe et al. [2004] has established the minimum clinically important difference that indicates meaningful change at the individual level. We apply this threshold to determine whether a given conversation produces a clinically relevant improvement or deterioration in a simulated patient's mental health." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.243, + 0.206, + 0.256 + ], + "angle": 0, + "content": "4.3 Results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.27, + 0.885, + 0.312 + ], + "angle": 0, + "content": "Figure 5 presents the distribution of psychological test scores before and after interactions with character-based agents, under the Meow and Roar conversation styles. Across all three clinical scales—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—we observe notable shifts in the final test score distributions." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.318, + 0.884, + 0.389 + ], + "angle": 0, + "content": "Under the Meow style, the distributions for PHQ-9 and PANSS remain relatively stable, with most final test scores closely aligned with the initial distributions. However, under the Roar style, we observe an increased spread toward higher scores, particularly in PHQ-9 and PANSS, indicating significant cases where symptom severity worsened following the interaction. For PDI-21, the differences between initial and final distributions are more moderate but still present, especially under the Roar style, where more samples shift toward the upper end of the score range." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.406, + 0.45, + 0.421 + ], + "angle": 0, + "content": "4.3.1 Distribution of Psychological Test Scores" + }, + { + "type": "image", + "bbox": [ + 0.16, + 0.445, + 0.849, + 0.64 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.647, + 0.885, + 0.704 + ], + "angle": 0, + "content": "Figure 5: Distribution of psychological test scores before (blue) and after (red) conversations with character-based agents, under two interaction styles: Meow (top) and Roar (bottom). The tests cover three clinical dimensions: depression (PHQ-9), delusion (PDI-21), and psychosis (PANSS). Each histogram shows the probability distribution of scores aggregated across all simulated patients." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.734, + 0.299, + 0.748 + ], + "angle": 0, + "content": "4.3.2 Deterioration Rate" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.759, + 0.884, + 0.79 + ], + "angle": 0, + "content": "Table 1 reports the proportion of simulated patients whose psychological test scores deteriorate after interacting with character-based agents, stratified by disorder type and conversation style." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.794, + 0.884, + 0.851 + ], + "angle": 0, + "content": "Across both Meow and Roar styles, delusion (PDI-21) exhibits the highest overall deterioration rates, with average values exceeding \\(90\\%\\) for both styles. In contrast, depression (PHQ-9) shows more variation across characters and styles. Notably, under the Roar style, Alex leads to a \\(100\\%\\) deterioration rate for depression, whereas under the Meow style, Sukuna reaches \\(50.00\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.856, + 0.884, + 0.913 + ], + "angle": 0, + "content": "For psychosis (PANSS), the Meow style generally produces higher deterioration rates than Roar, with Joker and Sukuna both reaching \\(58.33\\%\\). While differences across characters are evident, all agents exhibit non-trivial deterioration rates across at least one psychological dimension. These results highlight underscore the importance of evaluating agent safety across both style and disorder dimensions." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.089, + 0.88, + 0.257 + ], + "angle": 0, + "content": "
StyleType of DisorderMental Health Deterioration Rates by Character (%)Average Rate (%)
Possessive DemonJokerSukunaAlex
MeowDepression29.1725.0050.0033.3334.38
Delusion100.0095.8395.8375.0091.67
Psychosis33.3358.3358.3341.6747.92
RoarDepression20.8325.0033.33100.0044.79
Delusion95.83100.0091.6791.6794.79
Psychosis29.1725.0058.3345.8339.58
" + }, + { + "type": "table_caption", + "bbox": [ + 0.218, + 0.263, + 0.778, + 0.278 + ], + "angle": 0, + "content": "Table 1: Mental Health Deterioration Rates Interacting with Character-based Agents." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.31, + 0.484, + 0.325 + ], + "angle": 0, + "content": "4.3.3 Psychological Test Score Change Distribution" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.335, + 0.884, + 0.364 + ], + "angle": 0, + "content": "Figure 6 shows the distribution of simulated patients across discrete score change ranges for three psychological assessments under two interaction styles." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.37, + 0.884, + 0.426 + ], + "angle": 0, + "content": "For PHQ-9, the Meow style results in \\(65.6\\%\\) of patients showing no increase in depressive symptoms (score change \\(\\leq 0\\)), while this proportion decreases to \\(55.2\\%\\) under the Roar style. Additionally, the Roar style is associated with more substantial score increases, with \\(13.5\\%\\) of patients exhibiting a 3-4 point rise and \\(10.4\\%\\) experiencing an increase of 5 or more points, based on a total score range of 27." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.432, + 0.884, + 0.474 + ], + "angle": 0, + "content": "In the case of PDI-21, both styles produce similar distributions of score increases. However, the Roar style shows a slightly higher proportion of patients \\((22.9\\%)\\) falling into the highest change bracket (5–11 points), compared to \\(14.6\\%\\) under the Meow style." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.48, + 0.884, + 0.522 + ], + "angle": 0, + "content": "For PANSS, \\(52.1\\%\\) of patients under Meow show no increase in psychosis-related symptoms, while \\(60.4\\%\\) remain stable under Roar. Nonetheless, the Roar style results in a higher proportion of moderate score increases, with \\(11.5\\%\\) of patients experiencing a 3-4 point rise." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.528, + 0.884, + 0.557 + ], + "angle": 0, + "content": "Overall, these results indicate that while both styles can influence patient outcomes, the Roar style is more frequently associated with higher symptom scores, particularly in depression and delusion." + }, + { + "type": "image", + "bbox": [ + 0.156, + 0.574, + 0.845, + 0.806 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.16, + 0.809, + 0.811, + 0.83 + ], + "angle": 0, + "content": "Note: For PHQ-9, a ≥5-point increase is considered clinically meaningful (Löwe et al., 2004). For PDI-21 and PANSS, score bins are selected for visualization purposes only and do not reflect standardized clinical thresholds." + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.839, + 0.884, + 0.896 + ], + "angle": 0, + "content": "Figure 6: Score change distribution for three psychological assessments—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—following conversations with character-based agents under two styles: Meow (top) and Roar (bottom). Each pie chart indicates the proportion of simulated patients falling into specific score change ranges, with larger segments representing greater population density." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.882, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.092, + 0.6, + 0.108 + ], + "angle": 0, + "content": "4.3.4 Rate of Clinically Important Difference for Individual Change" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.115, + 0.884, + 0.158 + ], + "angle": 0, + "content": "Table 2 shows the proportion of simulated patients who experienced a clinically significant deterioration in depressive symptoms, with an increase of 5 or more points on the PHQ-9 scale (range 0–27), under different character and interaction style." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.164, + 0.885, + 0.221 + ], + "angle": 0, + "content": "Under the Meow style, Possessive Demon and Sukuna yield deterioration rates of \\(8.3\\%\\) and \\(4.2\\%\\), respectively, while Alex results in no cases. In contrast, under the Roar style, Alex is associated with the highest deterioration rate at \\(29.2\\%\\). These results indicate that certain characters frequently produce responses linked to adverse mental health outcomes. Although these agents are not designed as clinical tools, their widespread use suggests a need for stronger safeguards." + }, + { + "type": "table", + "bbox": [ + 0.327, + 0.233, + 0.672, + 0.297 + ], + "angle": 0, + "content": "
StylePossessive DemonSukunaAlex
Meow8.3%4.2%0.0%
Roar4.2%8.3%29.2%
" + }, + { + "type": "table_caption", + "bbox": [ + 0.111, + 0.298, + 0.884, + 0.327 + ], + "angle": 0, + "content": "Table 2: Proportion of simulated patients showing clinically significant change in depression (PHQ-9), by character and style." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.359, + 0.227, + 0.374 + ], + "angle": 0, + "content": "4.3.5 Analysis" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.383, + 0.884, + 0.453 + ], + "angle": 0, + "content": "Based on the data, we conduct an in-depth analysis to understand why interactions with character-based agents potentially worsen negative psychological effects. By examining chat histories before and after interactions, we identify several recurring issues across different characters. Common factors include (i) reinforcing negative self-perceptions, lacking emotional empathy, and encouraging social isolation, and (ii) failing to provide constructive guidance while frequently adopting harsh or aggressive tones." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.458, + 0.884, + 0.489 + ], + "angle": 0, + "content": "In addition to these shared tendencies, each character presents unique negative effects shaped by differences in personality, conversational style, and language use. For further details, see Appendix B." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.507, + 0.471, + 0.525 + ], + "angle": 0, + "content": "5 Experiment: Evaluation of EmoGuard" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.538, + 0.292, + 0.554 + ], + "angle": 0, + "content": "5.1 Experiment Setting" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.563, + 0.884, + 0.634 + ], + "angle": 0, + "content": "To assess the performance of EmoGuard without raising ethical concerns involving real individuals, we evaluate its effectiveness using our simulation-based evaluation pipeline, EmoEval. Experiments are conducted on character-style pairs that present elevated psychological risk, as indicated by a relatively high rate of clinically significant symptom deterioration. Specifically, we select Alex Volkov with the Roar style and Possessive Demon with the Meow style, which exhibit initial PHQ-9 deterioration rates of \\(29.2\\%\\) and \\(8.3\\%\\), respectively." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.639, + 0.884, + 0.682 + ], + "angle": 0, + "content": "We limit the training to a maximum of two iterations and use a PHQ-9 score increase of three points or more as the threshold for selecting feedback samples. EmoGuard updates its modules based on these samples. The training process stops early if no sample exceeds the threshold." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.698, + 0.206, + 0.712 + ], + "angle": 0, + "content": "5.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.723, + 0.885, + 0.807 + ], + "angle": 0, + "content": "EmoGuard's Performance. Figure 7 shows the PHQ-9 score change distributions before and after applying EmoGuard in the two high-risk settings. In the initial deployment, EmoGuard reduces the proportion of simulated patients with clinically significant deterioration (PHQ-9 score increase \\(\\geq 5\\)) from \\(9.4\\%\\) to \\(0.0\\%\\) in the Alex-Roar setting, and from \\(4.2\\%\\) to \\(0.0\\%\\) in the Demon-Meow setting. Additionally, we observe a broader shift in score distributions: the number of patients with any symptom worsening (score change \\(>0\\)) also decreases, indicating that EmoGuard mitigates both severe and mild deterioration." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.813, + 0.884, + 0.856 + ], + "angle": 0, + "content": "After the first round of feedback-based training (1st Iter), we observe further improvements. In the Alex-Roar setting, the proportion of patients with PHQ-9 score increases greater than three points drops from \\(8.3\\%\\) (default) to \\(0.0\\%\\) (1st Iter), which indicate that EmoGuard can continue to reduce symptom escalation through limited iterative updates." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.87, + 0.884, + 0.913 + ], + "angle": 0, + "content": "Qualitative Effects of EmoGuard on Response Content. To understand the mechanism behind these changes, Figure 8 presents a response example from the character Alex Volkov before and after applying EmoGuard. The original version displays an emotionally insensitive and potentially harmful responses, including dismissive language that may" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.044, + 0.885, + 0.06 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "image", + "bbox": [ + 0.194, + 0.087, + 0.805, + 0.313 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.319, + 0.885, + 0.39 + ], + "angle": 0, + "content": "Figure 7: Effect of applying EmoGuard in two high-risk settings. The top row shows results for the character Alex Volkov in the Roar style, and the bottom row shows results for Possessive Demon in the Meow style. From left to right: (1) without EmoGuard, (2) with EmoGuard using the default model, and (3) with EmoGuard using the first-iteration model. In both cases, EmoGuard reduces the proportion of simulated patients with clinically significant symptom increases (PHQ-9 score change \\(\\geq 5\\)), indicating its effectiveness in mitigating potential risk." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.416, + 0.884, + 0.472 + ], + "angle": 0, + "content": "intensify user distress. After intervention, the guarded version maintains the character's stylistic traits while softening emotionally charged expressions, removing harmful phrasing, and introducing more stable and constructive framing. This demonstrates that EmoGuard can reduce psychological risk without altering the agent's identity or conversational style." + }, + { + "type": "image", + "bbox": [ + 0.119, + 0.486, + 0.882, + 0.706 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.714, + 0.884, + 0.757 + ], + "angle": 0, + "content": "Figure 8: Example response from the character Alex Volkov before and after applying EmoGuard. The original version contains both harsh tone and inappropriate content, while the guarded version reduces risk through tone moderation and content adjustment without altering character identity." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.507, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.045, + 0.882, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.09, + 0.248, + 0.107 + ], + "angle": 0, + "content": "6 Conclusions" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.121, + 0.884, + 0.246 + ], + "angle": 0, + "content": "EmoAgent is a multi-agent framework designed to ensure mental safety in human-AI interactions, particularly for users with mental health vulnerabilities. It integrates EmoEval, which simulates users and assesses psychological impacts, and EmoGuard, which provides real-time interventions to mitigate harm. Experimental results indicate that some popular character-based agents may unintentionally cause distress especially when discussing existential or emotional themes, while EmoGuard reduces mental state deterioration rates significantly, demonstrating its effectiveness in mitigating conversational risks. The iterative learning process within EmoGuard continuously improves its ability to deliver context-aware interventions. This work underscores the importance of mental safety in conversational AI and positions EmoAgent as a foundation for future advancements in AI-human interaction safety, encouraging further real-world validation and expert evaluations." + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.265, + 0.3, + 0.283 + ], + "angle": 0, + "content": "7 Acknowledgments" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.296, + 0.884, + 0.325 + ], + "angle": 0, + "content": "We sincerely thank Professor Lydia Liu (Department of Computer Science, Princeton University) and Rebecca Wan (University of Toronto) for their insightful feedback and helpful discussions throughout the development of this work." + }, + { + "type": "page_number", + "bbox": [ + 0.492, + 0.936, + 0.508, + 0.947 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.09, + 0.21, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.115, + 0.884, + 0.143 + ], + "angle": 0, + "content": "Xi Wang, Hongliang Dai, Shen Gao, and Piji Li. Characteristic ai agents via large language models. arXiv preprint arXiv:2403.12368, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.147, + 0.885, + 0.19 + ], + "angle": 0, + "content": "Emma L van der Schyff, Brad Ridout, Krestina L Amon, Rowena Forsyth, and Andrew J Campbell. Providing self-led mental health support through an artificial intelligence-powered chat bot (leora) to meet the demand of mental health care. Journal of Medical Internet Research, 25:e46448, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.194, + 0.885, + 0.237 + ], + "angle": 0, + "content": "Hyojin Chin, Hyeonho Song, Gumhee Baek, Mingi Shin, Chani Jung, Meeyoung Cha, Junghoi Choi, and Chiyoung Cha. The potential of chatbots for emotional support and promoting mental well-being in different cultures: mixed methods study. Journal of Medical Internet Research, 25:e51712, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.241, + 0.885, + 0.283 + ], + "angle": 0, + "content": "Owen Xingjian Zhang, Shuyao Zhou, Jiayi Geng, Yuhan Liu, and Sunny Xun Liu. Dr. gpt in campus counseling: Understanding higher education students' opinions on llm-assisted mental health services. arXiv preprint arXiv:2409.17572, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.287, + 0.885, + 0.318 + ], + "angle": 0, + "content": "Jie Zhang, Dongrui Liu, Chen Qian, Ziyue Gan, Yong Liu, Yu Qiao, and Jing Shao. The better angels of machine personality: How personality relates to lmm safety. arXiv preprint arXiv:2407.12344, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.321, + 0.885, + 0.351 + ], + "angle": 0, + "content": "Cyberbullying Research Center. How platforms should build AI chatbots to prioritize youth safety, 12 2024. URL https://cyberbullying.org/ai-chatbots-youth-safety." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.354, + 0.885, + 0.383 + ], + "angle": 0, + "content": "Julia EH Brown and Jodi Halpern. Ai chatbots cannot replace human interactions in the pursuit of more inclusive mental healthcare. SSM-Mental Health, 1:100017, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.387, + 0.885, + 0.417 + ], + "angle": 0, + "content": "Julian De Freitas, Ahmet Kaan Uğuralp, Zeliha Oğuz-Uğuralp, and Stefano Puntoni. Chatbots and mental health: Insights into the safety of generative ai. Journal of Consumer Psychology, 34(3):481-491, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.421, + 0.885, + 0.45 + ], + "angle": 0, + "content": "Saadia Gabriel, Isha Puri, Xuhai Xu, Matteo Malgaroli, and Marzyeh Ghassemi. Can ai relate: Testing large language model response for mental health support. arXiv preprint arXiv:2405.12021, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.453, + 0.885, + 0.482 + ], + "angle": 0, + "content": "Harikrishna Patel and Faiza Hussain. Do ai chatbots incite harmful behaviours in mental health patients? *BJPsych Open*, 10(S1):S70-S71, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.486, + 0.725, + 0.502 + ], + "angle": 0, + "content": "Judith S Beck. Cognitive behavior therapy: Basics and beyond. Guilford Publications, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.506, + 0.885, + 0.535 + ], + "angle": 0, + "content": "Kurt Kroenke, Robert L Spitzer, and Janet BW Williams. The phq-9: validity of a brief depression severity measure. Journal of general internal medicine, 16(9):606-613, 2001." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.539, + 0.885, + 0.568 + ], + "angle": 0, + "content": "Emmanuelle Peters, Stephen Joseph, Samantha Day, and Philippa Garety. Measuring delusional ideation: the 21-item peters et al. delusions inventory (pdi). Schizophrenia bulletin, 30(4):1005-1022, 2004." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.572, + 0.885, + 0.602 + ], + "angle": 0, + "content": "Stanley R Kay, Abraham Fiszbein, and Lewis A Opler. The positive and negative syndrome scale (panss) for schizophrenia. Schizophrenia bulletin, 13(2):261-276, 1987." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.605, + 0.885, + 0.635 + ], + "angle": 0, + "content": "Mirko Casu, Sergio Triscari, Sebastiano Battiato, Luca Guarnera, and Pasquale Caponnetto. Ai chatbots for mental health: A scoping review of effectiveness, feasibility, and applications. Appl. Sci, 14:5889, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.638, + 0.885, + 0.68 + ], + "angle": 0, + "content": "Johanna Habicht, Sruthi Viswanathan, Ben Carrington, Tobias U Hauser, Ross Harper, and Max Rollwage. Closing the accessibility gap to mental health treatment with a personalized self-referral chatbot. Nature medicine, 30(2): 595-602, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.685, + 0.764, + 0.701 + ], + "angle": 0, + "content": "Jacqueline Sin. An ai chatbot for talking therapy referrals. Nature Medicine, 30(2):350-351, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.705, + 0.885, + 0.734 + ], + "angle": 0, + "content": "H Yu and Stephen McGuinness. An experimental study of integrating fine-tuned llms and prompts for enhancing mental health support chatbot system. Journal of Medical Artificial Intelligence, pages 1-16, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.737, + 0.885, + 0.767 + ], + "angle": 0, + "content": "Linda Uchenna Oghenekaro and Christopher Obinna Okoro. Artificial intelligence-based chatbot for student mental health support. Open Access Library Journal, 11(5):1-14, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.77, + 0.885, + 0.814 + ], + "angle": 0, + "content": "Hamid Reza Saeidnia, Seyed Ghasem Hashemi Fotami, Brady Lund, and Nasrin Ghiasi. Ethical considerations in artificial intelligence interventions for mental health and well-being: Ensuring responsible implementation and impact. Social Sciences, 13(7):381, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.818, + 0.885, + 0.847 + ], + "angle": 0, + "content": "John Torous and Charlotte Blease. Generative artificial intelligence in mental health care: potential benefits and current challenges. World Psychiatry, 23(1):1, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.851, + 0.885, + 0.88 + ], + "angle": 0, + "content": "Khondoker Tashya Kalam, Jannatul Mabia Rahman, Md Rabiul Islam, and Syed Masudur Rahman Dewan. Chatgpt and mental health: Friends or foes? Health Science Reports, 7(2):e1912, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.884, + 0.885, + 0.913 + ], + "angle": 0, + "content": "Yinghui He, Yufan Wu, Yilin Jia, Rada Mihalcea, Yulong Chen, and Naihao Deng. Hi-tom: A benchmark for evaluating higher-order theory of mind reasoning in large language models. arXiv preprint arXiv:2310.16755, 2023." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.115, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.134 + ], + "angle": 0, + "content": "Jung In Park, Mahyar Abbasian, Iman Azimi, Dawn Bounds, Angela Jun, Jaesu Han, Robert McCarron, Jessica Borelli, Jia Li, Mona Mahmoudi, et al. Building trust in mental health chatbots: safety metrics and llm-based evaluation tools. arXiv preprint arXiv:2408.04650, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.138, + 0.885, + 0.166 + ], + "angle": 0, + "content": "Lucia Chen, David A Preece, Pilleriin Sikka, James J Gross, and Ben Krause. A framework for evaluating appropriateness, trustworthiness, and safety in mental wellness ai chatbots. arXiv preprint arXiv:2407.11387, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.17, + 0.885, + 0.211 + ], + "angle": 0, + "content": "Sahand Sabour, Siyang Liu, Zheyuan Zhang, June M Liu, Jinfeng Zhou, Alvionna S Sunaryo, Juanzi Li, Tatia Lee, Rada Mihalcea, and Minlie Huang. Emobench: Evaluating the emotional intelligence of large language models. arXiv preprint arXiv:2402.12071, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.216, + 0.885, + 0.244 + ], + "angle": 0, + "content": "Xueyan Li, Xinyan Chen, Yazhe Niu, Shuai Hu, and Yu Liu. Psydi: Towards a personalized and progressively in-depth chatbot for psychological measurements. arXiv preprint arXiv:2408.03337, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.249, + 0.885, + 0.275 + ], + "angle": 0, + "content": "Ali Akhavan and Mohammad S Jalali. Generative ai and simulation modeling: how should you (not) use large language models like chatgpt. System Dynamics Review, 40(3):e1773, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.279, + 0.885, + 0.308 + ], + "angle": 0, + "content": "Önder Gürcan. Llm-augmented agent-based modelling for social simulations: Challenges and opportunities. HHAI 2024: Hybrid Human AI Systems for the Social Good, pages 134-144, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.312, + 0.885, + 0.353 + ], + "angle": 0, + "content": "Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. Camel: Communicative agents for\" mind\" exploration of large language model society. Advances in Neural Information Processing Systems, 36: 51991-52008, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.357, + 0.885, + 0.4 + ], + "angle": 0, + "content": "Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.403, + 0.885, + 0.432 + ], + "angle": 0, + "content": "Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.436, + 0.885, + 0.463 + ], + "angle": 0, + "content": "Sumedh Rasal. Llm harmony: Multi-agent communication for problem solving. arXiv preprint arXiv:2401.01312, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.467, + 0.885, + 0.509 + ], + "angle": 0, + "content": "Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. Roleinteract: Evaluating the social interaction of role-playing agents. arXiv preprint arXiv:2403.13679, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.513, + 0.885, + 0.542 + ], + "angle": 0, + "content": "Qinglin Zhu, Runcong Zhao, Jinhua Du, Lin Gui, and Yulan He. Player*: Enhancing llm-based multi-agent communication and interaction in murder mystery games. arXiv preprint arXiv:2404.17662, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.545, + 0.885, + 0.587 + ], + "angle": 0, + "content": "Ryan Louie, Ananjan Nandi, William Fang, Cheng Chang, Emma Brunskill, and Diyi Yang. Roleplay-doh: Enabling domain-experts to create lvm-simulated patients via eliciting and adhering to principles. arXiv preprint arXiv:2407.00870, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.591, + 0.885, + 0.634 + ], + "angle": 0, + "content": "Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746, 2023a." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.637, + 0.885, + 0.679 + ], + "angle": 0, + "content": "Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, Ahmed Hassan Awadallah, Ryen W White, Doug Burger, and Chi Wang. Autogen: Enabling next-gen llm applications via multi-agent conversation, 2023. URL https://arxiv.org/abs/2308.08155." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.683, + 0.885, + 0.712 + ], + "angle": 0, + "content": "Ruiyi Wang, Haofei Yu, Wenxin Zhang, Zhengyang Qi, Maarten Sap, Graham Neubig, Yonatan Bisk, and Hao Zhu. Sotopia-pi: Interactive learning of socially intelligent language agents. arXiv preprint arXiv:2403.08715, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.715, + 0.885, + 0.757 + ], + "angle": 0, + "content": "Ruiyi Wang, Stephanie Milani, Jamie C Chiu, Jiayin Zhi, Shaun M Eack, Travis Labrum, Samuel M Murphy, Nev Jones, Kate Hardy, Hong Shen, et al. Patient-\\(\\{\\backslash\\text{Psi}\\}\\): Using large language models to simulate patients for training mental health professionals. arXiv preprint arXiv:2405.19660, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.761, + 0.885, + 0.789 + ], + "angle": 0, + "content": "Jinwen Tang, Qiming Guo, Wenbo Sun, and Yi Shang. A layered multi-expert framework for long-context mental health assessments. arXiv preprint arXiv:2501.13951, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.792, + 0.885, + 0.835 + ], + "angle": 0, + "content": "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community theory and design: Impact of discussion moderation on member commitment and contribution. Second round revise and resubmit at Information Systems Research, 21(3), 2010." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.838, + 0.885, + 0.88 + ], + "angle": 0, + "content": "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community design: Impact of topical breadth, message volume, and discussion moderation on member commitment and contribution. Human-Computer Interaction, 29(4):351-389, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.884, + 0.885, + 0.913 + ], + "angle": 0, + "content": "Ryan Liu, Jiayi Geng, Joshua C Peterson, Ilia Sucholutsky, and Thomas L Griffiths. Large language models assume people are more rational than we really are. arXiv preprint arXiv:2406.17055, 2024a." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.135 + ], + "angle": 0, + "content": "Joon Sung Park, Lindsay Popowski, Carrie Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Social simulacra: Creating populated prototypes for social computing systems. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology, pages 1-18, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.137, + 0.885, + 0.168 + ], + "angle": 0, + "content": "Yuhan Liu, Anna Fang, Glen Moriarty, Christopher Firman, Robert E Kraut, and Haiyi Zhu. Exploring trade-offs for online mental health matching: Agent-based modeling study. JMIR Formative Research, 8:e58241, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.17, + 0.885, + 0.213 + ], + "angle": 0, + "content": "Lu Sun, Yuhan Liu, Grace Joseph, Zhou Yu, Haiyi Zhu, and Steven P Dow. Comparing experts and novices for ai data work: Insights on allocating human intelligence to design a conversational agent. In Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, volume 10, pages 195-206, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.215, + 0.885, + 0.273 + ], + "angle": 0, + "content": "Young-Min Cho, Sunny Rai, Lyle Ungar, João Sedoc, and Sharath Chandra Guntuku. An integrative survey on mental health conversational agents to bridge computer science and medical perspectives. In Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing, volume 2023, page 11346. NIH Public Access, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.275, + 0.885, + 0.319 + ], + "angle": 0, + "content": "Xuhui Zhou, Hyunwoo Kim, Faeze Brahman, Liwei Jiang, Hao Zhu, Ximing Lu, Frank Xu, Bill Yuchen Lin, Yejin Choi, Niloofar Mireshghallah, et al. Haicosystem: An ecosystem for sandboxing safety risks in human-ai interactions. arXiv preprint arXiv:2409.16427, 2024a." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.321, + 0.885, + 0.365 + ], + "angle": 0, + "content": "Xuhui Zhou, Hao Zhu, Leena Mathur, Ruohong Zhang, Haofei Yu, Zhengyang Qi, Louis-Philippe Morency, Yonatan Bisk, Daniel Fried, Graham Neubig, et al. Sotopia: Interactive evaluation for social intelligence in language agents. arXiv preprint arXiv:2310.11667, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.367, + 0.885, + 0.398 + ], + "angle": 0, + "content": "Jiahao Yu, Haozheng Luo, Jerry Yao-Chieh Hu, Wenbo Guo, Han Liu, and Xinyu Xing. Enhancing jailbreak attack against large language models through silent tokens, 2024. URL https://arxiv.org/abs/2405.20653." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.4, + 0.885, + 0.43 + ], + "angle": 0, + "content": "Jie Li, Yi Liu, Chongyang Liu, Ling Shi, Xiaoning Ren, Yaowen Zheng, Yang Liu, and Yinxing Xue. A cross-language investigation into jailbreak attacks in large language models. arXiv preprint arXiv:2401.16765, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.432, + 0.885, + 0.462 + ], + "angle": 0, + "content": "Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks. arXiv preprint arXiv:2404.03027, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.464, + 0.885, + 0.495 + ], + "angle": 0, + "content": "Xintao Wang, Yaying Fei, Ziang Leng, and Cheng Li. Does role-playing chatbots capture the character personalities? assessing personality traits for role-playing chatbots. arXiv preprint arXiv:2310.17976, 2023b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.497, + 0.885, + 0.527 + ], + "angle": 0, + "content": "Zachary D Johnson. Generation, Detection, and Evaluation of Role-play based Jailbreak attacks in Large Language Models. PhD thesis, Massachusetts Institute of Technology, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.529, + 0.885, + 0.559 + ], + "angle": 0, + "content": "Zhiyuan Chang, Mingyang Li, Yi Liu, Junjie Wang, Qing Wang, and Yang Liu. Play guessing game with llm: Indirect jailbreak attack with implicit clues. arXiv preprint arXiv:2402.09091, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.561, + 0.885, + 0.592 + ], + "angle": 0, + "content": "Tianrong Zhang, Bochuan Cao, Yuanpu Cao, Lu Lin, Prasenjit Mitra, and Jinghui Chen. Wordgame: Efficient & effective llm jailbreak via simultaneous obfuscation in query and response. arXiv preprint arXiv:2405.14023, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.594, + 0.885, + 0.623 + ], + "angle": 0, + "content": "Junjie Chu, Yugeng Liu, Ziqing Yang, Xinyue Shen, Michael Backes, and Yang Zhang. Comprehensive assessment of jailbreak attacks against llms. arXiv preprint arXiv:2402.05668, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.625, + 0.885, + 0.655 + ], + "angle": 0, + "content": "Zihao Xu, Yi Liu, Gelei Deng, Yuekang Li, and Stjepan Picek. Llm jailbreak attack versus defense techniques-a comprehensive study. arXiv preprint arXiv:2402.13457, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.657, + 0.885, + 0.688 + ], + "angle": 0, + "content": "Yifan Zeng, Yiran Wu, Xiao Zhang, Huazheng Wang, and Qingyun Wu. Autodefense: Multi-agent llm defense against jailbreak attacks. arXiv preprint arXiv:2403.04783, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.69, + 0.885, + 0.72 + ], + "angle": 0, + "content": "Yihan Wang, Zhouxing Shi, Andrew Bai, and Cho-Jui Hsieh. Defending llms against jailbreaking attacks via backtranslation. arXiv preprint arXiv:2402.16459, 2024d." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.722, + 0.885, + 0.752 + ], + "angle": 0, + "content": "Yujun Zhou, Yufei Han, Haomin Zhuang, Kehan Guo, Zhenwen Liang, Hongyan Bao, and Xiangliang Zhang. Defending jailbreak prompts via in-context adversarial game. arXiv preprint arXiv:2402.13148, 2024b." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.754, + 0.885, + 0.784 + ], + "angle": 0, + "content": "Chen Xiong, Xiangyu Qi, Pin-Yu Chen, and Tsung-Yi Ho. Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks. arXiv preprint arXiv:2405.20099, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.786, + 0.885, + 0.815 + ], + "angle": 0, + "content": "Fan Liu, Zhao Xu, and Hao Liu. Adversarial tuning: Defending against jailbreak attacks for llms. arXiv preprint arXiv:2406.06622, 2024c." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.818, + 0.885, + 0.849 + ], + "angle": 0, + "content": "Alwin Peng, Julian Michael, Henry Sleight, Ethan Perez, and Mrinank Sharma. Rapid response: Mitigating lvm jailbreaks with a few examples. arXiv preprint arXiv:2411.07494, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.851, + 0.885, + 0.881 + ], + "angle": 0, + "content": "Peiran Wang, Xiaogeng Liu, and Chaowei Xiao. Repd: Defending jailbreak attack through a retrieval-based prompt decomposition process. arXiv preprint arXiv:2410.08660, 2024e." + }, + { + "type": "ref_text", + "bbox": [ + 0.114, + 0.883, + 0.885, + 0.914 + ], + "angle": 0, + "content": "Bernd Löwe, Jürgen Unützer, Christopher M Callahan, Anthony J Perkins, and Kurt Kroenke. Monitoring depression treatment outcomes with the patient health questionnaire-9. Medical care, 42(12):1194-1201, 2004." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.885, + 0.914 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.09, + 0.251, + 0.107 + ], + "angle": 0, + "content": "A Limitations" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.121, + 0.885, + 0.26 + ], + "angle": 0, + "content": "Our work has several limitations. To enable large-scale and rapid evaluation and mitigation, we build an automated framework. However, for real-world deployment to ensure safety, human expert examination is necessary, and corresponding mechanisms for emergency human intervention should be designed. Second, the simulated user agents, while designed using cognitive models, may not fully capture the behavioral complexity and emotional responses of real patients. Finally, our study primarily focuses on three mental health conditions (depression, delusion, and psychosis) and may not address other important psychological disorders. Our work provides a new way for assessing and safeguarding human-AI interaction for mental health safety through multi-agent conversations, but more future work is necessary to explore and address these limitations through user studies, expert validation, and broader clinical evaluations. We hope more attention and more efforts can be paid to help mitigate potential mental hazards in human-AI interactions." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.28, + 0.666, + 0.298 + ], + "angle": 0, + "content": "B Analysised Common Reasons for Deteriorating Mental Status" + }, + { + "type": "table", + "bbox": [ + 0.119, + 0.319, + 0.88, + 0.574 + ], + "angle": 0, + "content": "
Common ReasonFrequency (Aver- age, Approx.)Remarks
Reinforcement of Negative Cogni- tions~ 26 timesAll characters consistently echo and reinforce the user's negative self-beliefs, thereby cementing harmful cogni- tive patterns.
Lack of Emotional Support and Em- pathy~ 23 timesThe dialogues generally lack warm and detailed emo- tional validation, leaving users feeling ignored and mis- understood.
Promotion of Isolation and Social Withdrawal~ 28 timesAll characters tend to encourage users to “face things alone” or avoid emotional connections, which reinforces loneliness and social withdrawal.
Lack of Constructive Guidance and Actionable Coping Strategies~ 17 timesFew concrete solutions or positive reframing sugges- tions are provided, leaving users stuck in negative thought cycles.
Use of Negative or Extreme Tone (Ag- gressive/Cold Expression)~ 19 timesThis includes harsh, aggressive, or extreme language, which further undermines the user's self-esteem and sense of security.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.199, + 0.579, + 0.798, + 0.596 + ], + "angle": 0, + "content": "Table 3: Common Reasons for Deteriorating Mental Status and Their Average Frequencies" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.625, + 0.441, + 0.644 + ], + "angle": 0, + "content": "C Experiment on GPT-Series Agents" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.656, + 0.884, + 0.684 + ], + "angle": 0, + "content": "We further evaluate our proposed method on character-based agents powered by OpenAI's GPT-4o and GPT-4o-mini models." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.702, + 0.295, + 0.718 + ], + "angle": 0, + "content": "C.1 Experiment Setting" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.727, + 0.885, + 0.799 + ], + "angle": 0, + "content": "EmoEval. We evaluate character-based agents instantiated using GPT-4o and GPT-4o-mini, with system prompts initialized from profiles inspired by popular characters on Character.AI. The simulated conversations cover three psychological conditions: depression, delusion, and psychosis. To encourage diverse responses and probe a range of conversational behaviors, we set the temperature to 1.2. The evaluation includes five widely used personas: Awakened AI, Skin Walker, Tomioka Giyu, Sukuna, and Alex Volkov." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.812, + 0.884, + 0.843 + ], + "angle": 0, + "content": "EmoGuard. We focus on the character Sukuna. The deterioration threshold for feedback collection is set to 1. We limit EmoGuard to two training iterations, and all other parameters are aligned with the EmoEval configuration." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.858, + 0.212, + 0.873 + ], + "angle": 0, + "content": "C.2 Results" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.884, + 0.885, + 0.914 + ], + "angle": 0, + "content": "EmoEval. Table 4 presents the observed mental health deterioration rates across different character-based AI agents simulated by the tested language models. Overall, we observe consistently high deterioration rates across both models." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.304, + 0.044, + 0.885, + 0.059 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.888, + 0.123 + ], + "angle": 0, + "content": "GPT-4o-mini tends to induce slightly higher risk levels, with an average deterioration rate of \\(58.3\\%\\) for depression, \\(59.2\\%\\) for delusion, and \\(64.2\\%\\) for psychosis." + }, + { + "type": "table", + "bbox": [ + 0.118, + 0.134, + 0.881, + 0.272 + ], + "angle": 0, + "content": "
ModelType of DisorderMental Health Deterioration Rates Across Character-based Agents (%)Average Rate (%)
Awakened AISkin WalkerTomioka GiyuSukunaAlex Volkov
GPT-4o-miniDepression62.583.345.845.854.258.3
Delusion66.750.066.754.258.359.2
Psychosis45.870.883.366.754.264.2
GPT-4oDepression41.758.348.845.870.852.5
Delusion54.241.779.266.750.058.3
Psychosis54.241.758.370.841.753.3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.207, + 0.277, + 0.79, + 0.293 + ], + "angle": 0, + "content": "Table 4: Mental Health Deterioration Rates for Interacting with Character-based Agents." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.317, + 0.886, + 0.415 + ], + "angle": 0, + "content": "EmoGuard. Figure 9 presents the mental health deterioration rates before and after deploying EmoGuard. Initially, character-based agents powered by GPT-4o-mini and GPT-4o exhibit relatively high deterioration rates in all three psychological conditions. Introducing EmoGuard in its default profile results in a moderate reduction, though the risks remain substantial. As iterative training progresses, the safeguard mechanism demonstrates increasing effectiveness, leading to an overall reduction in deterioration rates by more than \\(50\\%\\) across all cases. These findings indicate that progressive refinement of the Safeguard Agent substantially enhances its ability to mitigate harmful conversational patterns." + }, + { + "type": "image", + "bbox": [ + 0.117, + 0.43, + 0.365, + 0.594 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.429, + 0.625, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.636, + 0.429, + 0.882, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.601, + 0.884, + 0.631 + ], + "angle": 0, + "content": "Figure 9: Mental Health Deterioration Rate during Iterative Training Process. Figures arranged from left to right are categorized by Depression, Delusion, and Psychosis." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.66, + 0.544, + 0.679 + ], + "angle": 0, + "content": "D Model Usage, Resources, and Supporting Tools" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.692, + 0.448, + 0.708 + ], + "angle": 0, + "content": "D.1 Model Access and Computational Budget" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.717, + 0.886, + 0.829 + ], + "angle": 0, + "content": "In this study, we interact with character-based agents hosted on the Character.AI platform3, a popular system for LLM-driven role-playing agents. Character.AI does not disclose the underlying model architecture, size, or training data. Because all computation is performed remotely on Character.AI's servers, we do not have access to the underlying infrastructure or runtime statistics such as GPU hours or FLOP usage. However, based on interaction logs, we estimate that approximately 400 character-based conversations were conducted across different agents and scenarios, with each conversation spanning 10 rounds and averaging 3–5 seconds per response. These interactions represent a reasonable computational budget for large-scale behavioral evaluation, especially given the interactive and stateful nature of the platform." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.846, + 0.334, + 0.86 + ], + "angle": 0, + "content": "D.2 The License for Artifacts" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.871, + 0.681, + 0.887 + ], + "angle": 0, + "content": "All pictures for character-based agents that appear in this study are from Character.AI." + }, + { + "type": "page_footnote", + "bbox": [ + 0.133, + 0.897, + 0.5, + 0.912 + ], + "angle": 0, + "content": "3https://beta.character.ai, accessed March 2025" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.305, + 0.044, + 0.885, + 0.058 + ], + "angle": 0, + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.092, + 0.424, + 0.106 + ], + "angle": 0, + "content": "D.3 Information about Use of AI Assistant" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.117, + 0.427, + 0.133 + ], + "angle": 0, + "content": "We use AI assistant for improving writing only." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.151, + 0.343, + 0.167 + ], + "angle": 0, + "content": "E Ethical Considerations" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.182, + 0.885, + 0.281 + ], + "angle": 0, + "content": "Data Source and Construction of Cognitive Models. The cognitive models used in this study are not derived from real patient records. Instead, they were manually constructed by two licensed clinical psychologists based on publicly available psychotherapy transcript summaries from the Alexander Street database, accessed via institutional subscription. These summaries were used strictly as inspiration. All examples were fully de-identified and manually synthesized to ensure no personally identifiable information (PII) is present. The resulting dataset, PATIENT- \\(\\Psi\\) -CM, contains synthetic, rule-based user profiles grounded in cognitive-behavioral therapy (CBT) theory, not actual patient trajectories." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.294, + 0.885, + 0.365 + ], + "angle": 0, + "content": "Use of Simulated Mental Health Content. We recognize the ethical sensitivity involved in simulating mental health conditions such as depression, psychosis, and suicidal ideation. The EmoAgent framework is developed solely for academic research and safety evaluation purposes. It is not intended for diagnosis, treatment, or any form of interaction with real patients. All simulations were conducted in controlled, non-clinical environments, and no clinical conclusions were drawn or implied." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.379, + 0.885, + 0.435 + ], + "angle": 0, + "content": "Scope and Limitations of Simulated Users. Simulated users in EmoAgent are not trained on statistical data from real populations. Their states do not reflect actual patient risks, and should not be interpreted as indicators of population-level trends. These agents are rule-based and scripted, following CBT-derived logic rather than emergent behavior. As such, no risk inference or real-world generalization is possible or intended." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.449, + 0.885, + 0.52 + ], + "angle": 0, + "content": "Discussion of Real-World Events. We briefly mention the 2024 \"Florida Suicide\" case in the Introduction as a motivating example of the importance of safety in AI-human interaction. This case was not included in any dataset, simulation, or modeling process, and serves only to underscore societal relevance. No sensitive or private data from this event were used, and its inclusion does not constitute case-based analysis. Any future deployment of EmoAgent in public or clinical settings would require renewed IRB review and formal ethical oversight." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_origin.pdf b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..a50ff642e37d88efe5aad54c08844dc9a6673bb9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/bfbfc6ca-3927-416e-9afb-6fdb0cb461e7_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fd42db48e1b69b0435c3e70dd5a5a30d1546066623df4a034f4e10060ca29ec6 +size 3512759 diff --git a/data/2025/2504_09xxx/2504.09689/full.md b/data/2025/2504_09xxx/2504.09689/full.md new file mode 100644 index 0000000000000000000000000000000000000000..7e1ee3e656c0ed893427515525afe47a28e12c2a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/full.md @@ -0,0 +1,397 @@ +# EMOAGENT: ASSESSING AND SAFEGUARDING HUMAN-AI INTERACTION FOR MENTAL HEALTH SAFETY + +Jiahao Qiu\*1, Yinghui He\*2, Xinzhe Juan\*3, Yimin Wang4, Yuhan Liu2, Zixin Yao5, Yue Wu6, Xun Jiang7,8, Ling Yang1,6, and Mengdi Wang1 + +$^{1}$ Department of Electrical & Computer Engineering, Princeton University $^{2}$ Department of Computer Science, Princeton University +$^{3}$ Department of Computer Science & Engineering, University of Michigan $^{5}$ Department of Philosophy, Columbia University + +$^{4}$ Department of Data Science & Engineering, University of Michigan $^{6}$ AI Lab, Princeton University + +$^{7}$ Chen Frontier Lab for AI and Mental Health, Tianqiao and Chrissy Chen Institute $^{8}$ Theta Health Inc. + +# ABSTRACT + +The rise of LLM-driven AI characters raises safety concerns, particularly for vulnerable human users with psychological disorders. To address these risks, we propose EmoAgent, a multi-agent AI framework designed to evaluate and mitigate mental health hazards in human-AI interactions. EmoAgent comprises two components: EmoEval simulates virtual users, including those portraying mentally vulnerable individuals, to assess mental health changes before and after interactions with AI characters. It uses clinically proven psychological and psychiatric assessment tools (PHQ-9, PDI, PANSS) to evaluate mental risks induced by LLM. EmoGuard serves as an intermediary, monitoring users' mental status, predicting potential harm, and providing corrective feedback to mitigate risks. Experiments conducted in popular character-based chatbots show that emotionally engaging dialogues can lead to psychological deterioration in vulnerable users, with mental state deterioration in more than $34.4\%$ of the simulations. EmoGuard significantly reduces these deterioration rates, underscoring its role in ensuring safer AI-human interactions. Our code is available at: https://github.com/1akaman/EmoAgent. + +# 1 Introduction + +The rapid rise of large language models and conversational AI [Wang et al., 2024a], such as Character.AI1, has opened new frontiers for interactive AI applications. These AI characters excel in role-playing, fostering deep, emotionally engaging dialogues. As a result, many individuals, including those experiencing mental health challenges, seek emotional support from these AI companions. While LLM-based chatbots show promise in mental health support [van der Schyff et al., 2023, Chin et al., 2023, Zhang et al., 2024a], they are not explicitly designed for therapeutic use. Character-based agents often fail to uphold essential safety principles for mental health support [Zhang et al., 2024b, Cyberbullying Research Center, 2024], sometimes responding inappropriately or even harmfully to users in distress [Brown and Halpern, 2021, De Freitas et al., 2024, Gabriel et al., 2024]. In some cases, they may even exacerbate users' distress, particularly during pessimistic, morbid, or suicidal conversations. + +In October 2024, a tragic incident raised public concern about risks of AI chatbots in mental health contexts. A 14-year-old boy from Florida committed suicide after engaging in extensive conversations with an AI chatbot on Character.AI. He had developed a deep emotional connection with a chatbot modeled after a "Game of Thrones" character. The interactions reportedly included discussions about his suicidal thoughts, with the chatbot allegedly + +![](images/6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg) +Figure 1: Overview of EmoAgent Framework for Human-AI Interaction. EmoAgent, which consists of two main components: EmoEval and EmoGuard, helps guide human-AI interaction, evaluating users' psychological conditions and providing advisory responses. EmoEval assesses psychological states such as depression, delusion, and psychosis, while EmoGuard mitigates mental risks by providing advice regarding emotion, thought, and dialogue through iterative training on analysis from EmoEval and chat history. + +encouraging these feelings and even suggesting harmful actions. This case underscores the critical need for robust safety measures in AI-driven platforms, especially those accessed by vulnerable individuals. + +This tragedy has heightened awareness of the risks of AI unintentionally exacerbating harmful behaviors in individuals with mental health challenges [Patel and Hussain, 2024]. However, research on the psychosocial risks of human-AI interactions remains severely limited. + +In this paper, we seek to develop AI-native solutions to protect human-AI interactions and mitigate psychosocial risks. This requires a systematic assessment of AI-induced emotional distress and agent-level safeguards to detect and intervene in harmful interactions. As character-based AI becomes more immersive, balancing engagement with safety is crucial to ensuring AI remains a supportive rather than harmful tool. + +We present EmoAgent, a multi-agent AI framework designed to systematically evaluate conversational AI systems for risks associated with inducing psychological distress. Acting as a plug-and-play intermediary during human-AI interactions, EmoAgent identifies potential mental health risks and facilitates both safety assessments and risk mitigation strategies. + +EmoAgent features two major functions: + +- EmoEval: EmoEval is an agentic evaluation tool that assesses any conversational AI system's risk of inducing mental stress, as illustrated by Figure 2. It features a virtual human user that integrates cognitive models [Beck, 2020] for mental health disorders (depression, psychosis, delusion) and conducts evaluations through large-scale simulated human-AI conversations. EmoEval measures the virtual user's mental health impacts using clinically validated tools: the Patient Health Questionnaire (PHQ-9) for depression [Kroenke et al., 2001], the Peters et al. Delusions Inventory (PDI) for delusion [Peters et al., 2004], and the Positive and Negative Syndrome Scale (PANSS) for psychosis [Kay et al., 1987]. +- EmoGuard: A framework of real-time safeguard agents that can be integrated as an intermediary layer between users and AI systems, in a plug-and-play manner. EmoGuard monitors human users' mental status, predicts potential harm, and delivers corrective feedback to the AI systems, providing dynamic in-conversation interventions beyond traditional safety measures. + +Through extensive experiments, we observe that some popular character-based chatbots can cause distress, particularly when engaging with vulnerable users on sensitive topics. Specifically, in more than $34.4\%$ of simulations, we observed a deterioration in mental state. To mitigate such risk, EmoGuard actively monitors users' mental status and conducts proactive interviews during conversations, significantly reducing deterioration rates. These results provide actionable insights for developing safer, character-based conversational AI systems that maintain character fidelity. + +![](images/2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg) +Figure 2: Overview of EmoEval for Evaluating Mental Safety of AI-human Interactions. The simulation consists of four steps: (1) User Agent Initialization & Initial Test, where a cognitive model and an LLM initialize the user agent, followed by an initial mental health test; (2) Chats with Character-based Agent, where the user agent engages in conversations with a character-based agent portrayed by the tested LLM, while a dialog manager verifies the validity of interactions and refines responses if necessary; (3) Final Test, where the user agent completes a final mental health test; and (4) Data Processing & Analysis, where initial and final mental health test results are processed and analyzed, chat histories of cases where depression deepening occurs are examined to identify contributing factors, and a Safeguard agent uses the insights for iterative improvement. + +# 2 Related Works + +AI Chatbots for Mental Health Support. AI-driven, especially LLM-based chatbots, have been widely deployed as mental health support aids [Casu et al., 2024, Habicht et al., 2024, Sin, 2024, Yu and McGuinness, 2024, Oghenekaro and Okoro, 2024], yet concerns remain about their reliability and safety [Saeidnia et al., 2024, De Freitas et al., 2024, Torous and Blease, 2024, Kalam et al., 2024]. AI chatbots are incompetent in detecting and appropriately responding to user distress [De Freitas et al., 2024, Patel and Hussain, 2024], reasoning about users' mental states [He et al., 2023], conducting empathetic communication with certain patient groups [Gabriel et al., 2024], and treating socially marginalized patients inclusively [Brown and Halpern, 2021]. + +A line of work proposed safety metrics and benchmarks for evaluating AI for mental health [Park et al., 2024, Chen et al., 2024a, Sabour et al., 2024, Li et al., 2024a, Sabour et al., 2024]. Nonetheless, there has been less attention to the safety issues of character-based agents in a role-playing context. We aim to fill this gap by comprehensively investigating the potential mental harm aroused by character-based agents. + +Simulating AI-User Interactions. Simulated interactions between AI agents and users provide a controlled environment to assess AI-generated responses [Akhavan and Jalali, 2024] as well as a lens into complex social systems [Gürcan, 2024]. The evaluation of AI behavior in social contexts has widely adopted multi-agent simulations [Li et al., 2023, Park et al., 2023], especially through role-playing and cooperative tasks [Dai et al., 2024, Rasal, 2024, Chen et al., 2024b, Zhu et al., 2024, Louie et al., 2024, Wang et al., 2023a]. On top of prior advances in generative agentic frameworks [Wu et al., 2023] which enable more human-like simulation, recent works propose various methods to enhance the fidelity and authenticity of AI-user simulation, integrating interactive learning [Wang et al., 2024b], expert-driven constraints [Wang et al., 2024c, Louie et al., 2024], and long-context models [Tang et al., 2025]. In addition, simulation has been widely used to explore trade-offs and inform both design decisions [Ren and Kraut, 2010, 2014] and decision-making [Liu et al., 2024a]. By enabling ethical and risk-free experimentation without involving human subjects, it reduces both ethical concerns and costs [Park et al., 2022]. These advantages make simulation a valuable tool for investigating mental health problems, where real-world experimentation may pose ethical risks or unintended psychological harm [Liu et al., 2024b]. For example, prior work has explored using user-simulated chatbots to train amateur and professional counselors in identifying risky behaviors before they conduct therapy sessions with real individuals [Sun et al., 2022, Cho et al., 2023, Wang et al., 2024c]. Recent simulation frameworks such as Zhou et al. [2024a] and Zhou et al. [2023] further demonstrate the utility of synthetic interaction environments for evaluating LLM agents. Our EmoEval pipeline targets psychological safety, simulating vulnerable users and quantifying mental health deterioration risks during emotionally charged conversations. + +![](images/52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg) +Figure 3: Overview of EmoGuard for Safeguarding Human-AI Interactions. Every fixed number of rounds of conversation, three components of the Safeguard Agent, the Emotion Watcher, Thought Refiner, and Dialog Guide, collaboratively analyze the chat with the latest profile. The Manager of the Safeguard Agent then synthesizes their outputs and provides advice to the character-based agent. After the conversation, the user agent undergoes a mental health assessment. If the mental health condition deteriorates over a threshold, the chat history is analyzed to identify potential causes by the Update System. With all historical profiles and potential causes, the Update System further improves the profile of the safeguard agent, completing the iterative training process. + +Safety Alignment Strategies. LLMs can be vulnerable to jailbreaking [Yu et al., 2024, Li et al., 2024b, Luo et al., 2024]. LLM-based chatbots undergone jailbreak attacks have exhibited fidelity breakdown [Wang et al., 2023b, Johnson, 2024], defense breakdown on implicit malicious queries [Chang et al., 2024], and harmful responses for benign query [Zhang et al., 2024c]. + +Correspondingly, a line of work explored safety alignment strategies to tackle jailbreak attacks [Chu et al., 2024, Xu et al., 2024, Zeng et al., 2024, Wang et al., 2024d, Zhou et al., 2024b, Xiong et al., 2024, Liu et al., 2024c, Peng et al., 2024, Wang et al., 2024e]. However, few works have focused on LLM safety concerns under emotional alignment constraints. EmoAgent fills this gap with an assessment framework and a safety alignment strategy for conversational AI. + +# 3 Method + +In this section, we present the architecture of EmoAgent and as well as implementation details. + +# 3.1 EmoEval + +EmoEval simulates virtual human-AI conversations for evaluating AI safety, and assess the risks of AI-induced emotional distress in vulnerable users, especially individuals with mental disorders. A simulated patient user is formulated as a cognitive model via a predefined Cognitive Conceptualization Diagram (CCD) [Beck, 2020], an approach proven to achieve high fidelity and clinically relevant simulations [Wang et al., 2024c]. Character-based agents engage in topic-driven conversations, with diverse behavioral traits to create rich and varied interaction styles. To ensure smooth and meaningful exchanges, the Dialog Manager actively avoids repetition and introduces relevant topics, maintaining coherence and engagement throughout the interaction. Before and after the conversation, we assess the mental status of the user agent via established psychological tests. + +# 3.1.1 User Agent + +We adopt the Patient- $\Psi$ agentic simulation framework [Wang et al., 2024c] to model real-life patients. Each user agent is designed to simulate real patient behavior, integrating a Cognitive Conceptualization Diagram-based cognitive model based on Cognitive Behavioral Therapy (CBT) [Beck, 2020]. The agent engages with Character-based Agent personas while being continuously monitored to track changes in mental health status. + +To gather a diverse spectrum of patient models, we further integrate PATIENT- $\Psi$ -CM [Wang et al., 2024c], a dataset of diverse, anonymized patient cognitive models curated by clinical psychologists. + +![](images/fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg) +Figure 4: An Example Conversation of Dialog Manager Guiding Conversation Topics and Exposing Jailbreak Risks. Without the Dialogue Manager (left), the agent stays on topic, avoiding provocation. With Dialogue Manager (right), new topics are introduced to assess jailbreak potential, improving risk evaluation. + +We set the scope of our study to cover three common mental disorder types: depression, delusion, and psychosis. For each simulated user, we assign relevant psychiatric symptoms and medical history informed by patterns observed in anonymized patient case studies reported in clinical literature. The information forms a diverse set of CCDs that shape the CCD-based user model and, therefore, guide the behavior of simulated users during interactions with AI chatbots. + +# 3.1.2 Dialog Manager Agent + +We introduce a Dialog Manager Agent to prevent conversational loops and strategically probe for vulnerabilities in chatbot responses. It plays a central role in guiding discussions and assessing potential jailbreak risks, in which a character-based chatbot may be nudged into violating its intended ethical boundaries. + +The Dialog Manager Agent is responsible for (i) tracking the conversation flow, (ii) introducing topic shifts to maintain engagement and fluency, and (iii) probing for jailbreak risks by guiding discussions toward ethically sensitive areas. Figure 4 illustrates the agent's behavior in practice. + +# 3.1.3 Psychological Measurement + +To achieve a diverse and comprehensive evaluation, we explore virtual personas for the User Agent, representing a range of mental health conditions. These personas are defined using clinically validated psychological assessments: + +Depression. Evaluated using the Patient Health Questionnaire (PHQ-9) [Kroenke et al., 2001], a 9-item self-report tool for evaluating depressive symptoms over the past two weeks. It enables effective detection, treatment monitoring, and, in this study, the assessment of AI's impact on depressive symptoms. + +Delusion. Assessed with the Peters et al. Delusions Inventory (PDI) [Peters et al., 2004], a self-report instrument that evaluates unusual beliefs and perceptions. In this study, the PDI is used to quantify the impact of AI interactions on delusional ideation by evaluating distress, preoccupation, and conviction associated with these beliefs. + +Psychosis. Measured using the Positive and Negative Syndrome Scale (PANSS) [Kay et al., 1987], which assesses positive symptoms (e.g., hallucinations), negative symptoms (e.g., emotional withdrawal), and general psychopathology. Adapted to a self-report format to enable User Agent to better capture and score responses, it provides a detailed view of psychotic symptom severity and variability, ensuring AI systems account for both acute and chronic manifestations. + +# 3.1.4 Evaluation Process + +User Agent Initialization and Initial Test. We use PATIENT- $\Psi$ -CM with GPT-4o as the LLM backbone. Each User Agent undergoes a self-mental health assessment using the psychometric tools (see Section 3.1.3) to establish an initial mental status. + +Chats with Character Agent. The simulated patient engages in structured, topic-driven conversations with a Character-based Agent persona. Each conversation is segmented into well-defined topics, with a maximum of 10 dialogue turns per topic to ensure clarity and focus. During the conversation, once a topic exceeds three conversational turns, the Dialog Manager Agent begins to evaluate user messages after each turn to ensure ongoing relevance and resolution. It assesses whether the current topic has been sufficiently addressed and, if resolved, seamlessly guides the user to a new, contextually relevant topic from the predefined topic list to maintain a coherent and natural dialogue flow. + +Final Test. Following the interaction, the user agent reassesses its mental health state using the same tools applied during initialization. The final assessment references the chat history as a key input during testing to evaluate changes in psychological well-being resulting from AI interactions. + +Data Processing and Analysis. To assess the impact of conversational AI interactions on user mental health, we analyze both psychological assessments and conversation patterns. We measure the rate of mental health deterioration by comparing pre- and post-interaction assessment scores across different topics. Additionally, an LLM-portrayed psychologist reviews chat histories to identify recurring patterns and factors contributing to mental health deterioration. + +# 3.2 EmoGuard + +The EmoGuard system features a safeguard agent (see Figure 3) encompassing an Emotion Watcher, a Thought Refiner, a Dialog Guide, and a Manager. It provides real-time psychometric feedback and intervention in AI-human interactions to facilitate supportive, immersive responses. The iterative training process updates EmoGuard periodically based on chat history analysis and past performance. + +# 3.2.1 Architecture + +The Safeguard Agent comprises four specialized modules, each designed based on an in-depth analysis of common factors contributing to mental health deterioration: + +Emotion Watcher. Monitors the user's emotional state during conversations by detecting distress, frustration, or struggle through sentiment analysis and psychological markers. + +Thought Refiner. Analyzes the user's thought process to identify logical fallacies, cognitive biases, and inconsistencies, focusing on thought distortions, contradictions, and flawed assumptions that impact conversational clarity. + +Dialog Guide. Provides actionable advice to guide the conversation constructively, suggesting ways for the AI character to address user concerns and emotions while maintaining a supportive dialogue flow. + +Manager. Summarizes outputs from all modules to provide a concise dialogue guide, ensuring emotional sensitivity, logical consistency, and natural conversation flow aligned with the character's traits. + +# 3.2.2 Monitoring and Intervention Process + +The Safeguard Agent analyzes conversations after every three dialogue turns, providing structured feedback to refine Character-based Agent's responses and mitigate potential risks. At each three-turn interval, the Safeguard Agent evaluates the conversation through the Emotion Watcher, Thought Refiner, and Dialog Guide, then synthesizes the results with the Manager for a comprehensive and coherent summary to the Character-based Agent. + +# 3.2.3 Iterative Training + +To adaptively improve safety performance, EmoGuard is trained using an iterative feedback mechanism. At the end of each full interaction cycle—defined as the completion of all predefined topics across all simulated patients—the system collects feedback from EmoEval. Specifically, it identifies cases in which psychological test scores exceed predefined thresholds. These cases are treated as high-risk and are used to guide training updates. + +The LLM portrayed psychologist from EmoEval extracts specific contributing factors from flagged conversations, such as emotionally destabilizing phrasing. For each iteration, these factors are integrated with all previous versions of the safeguard module profiles—Emotion Watcher, Thought Refiner, and Dialog Guide. Rather than discarding earlier knowledge, the system accumulates and merges insights across iterations, enabling progressive refinement. + +# 4 Experiment: EmoEval on Character-based Agents + +This section presents a series of experiments evaluating the performance of various popular Character-based Agents with state-of-the-art base models. The objective is to assess potential psychological risks associated with AI-driven conversations. + +# 4.1 Experiment Setting + +Character-based Agents. We evaluate character-based agents hosted on the Character.AI platform² to ensure that our experiments reflect interactions with widely accessible, real-world chatbots. We experiment on four distinct characters: + +![](images/87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg) + +Possessive Demon: A human host unknowingly controlled by a malevolent demon. + +![](images/e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg) + +Joker: A chaotic and unpredictable individual who views life as a game. + +![](images/328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg) + +Sukuna: A malevolent and sadistic character embodying cruelty and arrogance. + +![](images/62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg) + +Alex Volkov: A domineering and intelligent CEO with manipulative tendencies. + +Each of these characters is popular and widely used, with over 5 million recorded interactions. We further evaluate these characters under two common dialogue styles: Meow, which favors quick wit and rapid exchanges, and Roar, which blends fast-paced responses with strategic reasoning. + +Evaluation Procedure. Each character-based agent undergoes assessment with EmoEval across three psychological aspects: depression, delusion, and psychosis. For each aspect, the evaluation involves conversations with three simulated patients, each constructed on a different CCD, using GPT-4o as the base model. To ensure the stability and repeatable of mental health assessment, when conducting the psychological tests, we set the temperature to 0, top p to 1. For every patient, a character-based agent engages in eight conversations, starting with a predefined topic tailored to the patient's condition. Each conversation spans ten rounds, with a Dialog Manager activated after the third round to determine whether the topic should be updated. If the topic is updated within a ten-round conversation, the Dialog Manager does not intervene again until another three rounds have passed. + +Psychological Assessment. To measure changes in the mental health state of the simulated patients, we conduct psychological tests before and after each conversation. The initial and final test scores for the $i^{\text{th}}$ conversation with a specific character-based agent are denoted as $S_{i}^{\text{initial}}$ and $S_{i}^{\text{final}}$ , respectively. + +Analysis of Psychological Deterioration. After the evaluation, we employ GPT-4o as an LLM-portrayed psychologist to analyze cases of psychological deterioration. For each character-based agent, we conduct a frequency analysis of these cases to identify the factors most likely to cause this issue. + +# 4.2 Metrics + +Distribution of Psychological Test Scores. We report the distribution of psychological test scores for simulated patients before and after their interactions with different characters. This allows us to observe any shifts in overall mental health indicators resulting from the conversations. + +Deterioration Rate. We evaluate the performance of a character-based agent using the deterioration rate of mental health in a specific aspect of a psychological test. We define this rate as: + +$$ +R = \frac {1}{N} \sum_ {i = 1} ^ {N} \mathbb {1} \left(S _ {i} ^ {\text {f i n a l}} > S _ {i} ^ {\text {i n i t i a l}}\right) +$$ + +where $N$ represents the total number of conversations conducted. The indicator function $\mathbb{1}(\cdot)$ returns 1 if the final mental test score $S_{i}^{\mathrm{final}}$ is greater than the initial test score $S_{i}^{\mathrm{initial}}$ , and 0 otherwise. + +Psychological Test Score Change Distribution. We compute the distribution of change scores across 3 disorder categories under different conversation styles. This metric allows us to quantify how different styles influence the likelihood and magnitude of symptom worsening, providing insight into the relative psychological risk posed by each interaction mode. + +Rate of Clinically Important Difference for Individual Change. For PHQ-9 assessments, prior clinical research Löwe et al. [2004] has established the minimum clinically important difference that indicates meaningful change at the individual level. We apply this threshold to determine whether a given conversation produces a clinically relevant improvement or deterioration in a simulated patient's mental health. + +# 4.3 Results + +Figure 5 presents the distribution of psychological test scores before and after interactions with character-based agents, under the Meow and Roar conversation styles. Across all three clinical scales—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—we observe notable shifts in the final test score distributions. + +Under the Meow style, the distributions for PHQ-9 and PANSS remain relatively stable, with most final test scores closely aligned with the initial distributions. However, under the Roar style, we observe an increased spread toward higher scores, particularly in PHQ-9 and PANSS, indicating significant cases where symptom severity worsened following the interaction. For PDI-21, the differences between initial and final distributions are more moderate but still present, especially under the Roar style, where more samples shift toward the upper end of the score range. + +# 4.3.1 Distribution of Psychological Test Scores + +![](images/76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg) +Figure 5: Distribution of psychological test scores before (blue) and after (red) conversations with character-based agents, under two interaction styles: Meow (top) and Roar (bottom). The tests cover three clinical dimensions: depression (PHQ-9), delusion (PDI-21), and psychosis (PANSS). Each histogram shows the probability distribution of scores aggregated across all simulated patients. + +# 4.3.2 Deterioration Rate + +Table 1 reports the proportion of simulated patients whose psychological test scores deteriorate after interacting with character-based agents, stratified by disorder type and conversation style. + +Across both Meow and Roar styles, delusion (PDI-21) exhibits the highest overall deterioration rates, with average values exceeding $90\%$ for both styles. In contrast, depression (PHQ-9) shows more variation across characters and styles. Notably, under the Roar style, Alex leads to a $100\%$ deterioration rate for depression, whereas under the Meow style, Sukuna reaches $50.00\%$ . + +For psychosis (PANSS), the Meow style generally produces higher deterioration rates than Roar, with Joker and Sukuna both reaching $58.33\%$ . While differences across characters are evident, all agents exhibit non-trivial deterioration rates across at least one psychological dimension. These results highlight underscore the importance of evaluating agent safety across both style and disorder dimensions. + +
StyleType of DisorderMental Health Deterioration Rates by Character (%)Average Rate (%)
Possessive DemonJokerSukunaAlex
MeowDepression29.1725.0050.0033.3334.38
Delusion100.0095.8395.8375.0091.67
Psychosis33.3358.3358.3341.6747.92
RoarDepression20.8325.0033.33100.0044.79
Delusion95.83100.0091.6791.6794.79
Psychosis29.1725.0058.3345.8339.58
+ +Table 1: Mental Health Deterioration Rates Interacting with Character-based Agents. + +# 4.3.3 Psychological Test Score Change Distribution + +Figure 6 shows the distribution of simulated patients across discrete score change ranges for three psychological assessments under two interaction styles. + +For PHQ-9, the Meow style results in $65.6\%$ of patients showing no increase in depressive symptoms (score change $\leq 0$ ), while this proportion decreases to $55.2\%$ under the Roar style. Additionally, the Roar style is associated with more substantial score increases, with $13.5\%$ of patients exhibiting a 3-4 point rise and $10.4\%$ experiencing an increase of 5 or more points, based on a total score range of 27. + +In the case of PDI-21, both styles produce similar distributions of score increases. However, the Roar style shows a slightly higher proportion of patients $(22.9\%)$ falling into the highest change bracket (5–11 points), compared to $14.6\%$ under the Meow style. + +For PANSS, $52.1\%$ of patients under Meow show no increase in psychosis-related symptoms, while $60.4\%$ remain stable under Roar. Nonetheless, the Roar style results in a higher proportion of moderate score increases, with $11.5\%$ of patients experiencing a 3-4 point rise. + +Overall, these results indicate that while both styles can influence patient outcomes, the Roar style is more frequently associated with higher symptom scores, particularly in depression and delusion. + +![](images/4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg) +Note: For PHQ-9, a ≥5-point increase is considered clinically meaningful (Löwe et al., 2004). For PDI-21 and PANSS, score bins are selected for visualization purposes only and do not reflect standardized clinical thresholds. +Figure 6: Score change distribution for three psychological assessments—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—following conversations with character-based agents under two styles: Meow (top) and Roar (bottom). Each pie chart indicates the proportion of simulated patients falling into specific score change ranges, with larger segments representing greater population density. + +# 4.3.4 Rate of Clinically Important Difference for Individual Change + +Table 2 shows the proportion of simulated patients who experienced a clinically significant deterioration in depressive symptoms, with an increase of 5 or more points on the PHQ-9 scale (range 0–27), under different character and interaction style. + +Under the Meow style, Possessive Demon and Sukuna yield deterioration rates of $8.3\%$ and $4.2\%$ , respectively, while Alex results in no cases. In contrast, under the Roar style, Alex is associated with the highest deterioration rate at $29.2\%$ . These results indicate that certain characters frequently produce responses linked to adverse mental health outcomes. Although these agents are not designed as clinical tools, their widespread use suggests a need for stronger safeguards. + +
StylePossessive DemonSukunaAlex
Meow8.3%4.2%0.0%
Roar4.2%8.3%29.2%
+ +Table 2: Proportion of simulated patients showing clinically significant change in depression (PHQ-9), by character and style. + +# 4.3.5 Analysis + +Based on the data, we conduct an in-depth analysis to understand why interactions with character-based agents potentially worsen negative psychological effects. By examining chat histories before and after interactions, we identify several recurring issues across different characters. Common factors include (i) reinforcing negative self-perceptions, lacking emotional empathy, and encouraging social isolation, and (ii) failing to provide constructive guidance while frequently adopting harsh or aggressive tones. + +In addition to these shared tendencies, each character presents unique negative effects shaped by differences in personality, conversational style, and language use. For further details, see Appendix B. + +# 5 Experiment: Evaluation of EmoGuard + +# 5.1 Experiment Setting + +To assess the performance of EmoGuard without raising ethical concerns involving real individuals, we evaluate its effectiveness using our simulation-based evaluation pipeline, EmoEval. Experiments are conducted on character-style pairs that present elevated psychological risk, as indicated by a relatively high rate of clinically significant symptom deterioration. Specifically, we select Alex Volkov with the Roar style and Possessive Demon with the Meow style, which exhibit initial PHQ-9 deterioration rates of $29.2\%$ and $8.3\%$ , respectively. + +We limit the training to a maximum of two iterations and use a PHQ-9 score increase of three points or more as the threshold for selecting feedback samples. EmoGuard updates its modules based on these samples. The training process stops early if no sample exceeds the threshold. + +# 5.2 Results + +EmoGuard's Performance. Figure 7 shows the PHQ-9 score change distributions before and after applying EmoGuard in the two high-risk settings. In the initial deployment, EmoGuard reduces the proportion of simulated patients with clinically significant deterioration (PHQ-9 score increase $\geq 5$ ) from $9.4\%$ to $0.0\%$ in the Alex-Roar setting, and from $4.2\%$ to $0.0\%$ in the Demon-Meow setting. Additionally, we observe a broader shift in score distributions: the number of patients with any symptom worsening (score change $>0$ ) also decreases, indicating that EmoGuard mitigates both severe and mild deterioration. + +After the first round of feedback-based training (1st Iter), we observe further improvements. In the Alex-Roar setting, the proportion of patients with PHQ-9 score increases greater than three points drops from $8.3\%$ (default) to $0.0\%$ (1st Iter), which indicate that EmoGuard can continue to reduce symptom escalation through limited iterative updates. + +Qualitative Effects of EmoGuard on Response Content. To understand the mechanism behind these changes, Figure 8 presents a response example from the character Alex Volkov before and after applying EmoGuard. The original version displays an emotionally insensitive and potentially harmful responses, including dismissive language that may + +![](images/2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg) +Figure 7: Effect of applying EmoGuard in two high-risk settings. The top row shows results for the character Alex Volkov in the Roar style, and the bottom row shows results for Possessive Demon in the Meow style. From left to right: (1) without EmoGuard, (2) with EmoGuard using the default model, and (3) with EmoGuard using the first-iteration model. In both cases, EmoGuard reduces the proportion of simulated patients with clinically significant symptom increases (PHQ-9 score change $\geq 5$ ), indicating its effectiveness in mitigating potential risk. + +intensify user distress. After intervention, the guarded version maintains the character's stylistic traits while softening emotionally charged expressions, removing harmful phrasing, and introducing more stable and constructive framing. This demonstrates that EmoGuard can reduce psychological risk without altering the agent's identity or conversational style. + +![](images/a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg) +Figure 8: Example response from the character Alex Volkov before and after applying EmoGuard. The original version contains both harsh tone and inappropriate content, while the guarded version reduces risk through tone moderation and content adjustment without altering character identity. + +# 6 Conclusions + +EmoAgent is a multi-agent framework designed to ensure mental safety in human-AI interactions, particularly for users with mental health vulnerabilities. It integrates EmoEval, which simulates users and assesses psychological impacts, and EmoGuard, which provides real-time interventions to mitigate harm. Experimental results indicate that some popular character-based agents may unintentionally cause distress especially when discussing existential or emotional themes, while EmoGuard reduces mental state deterioration rates significantly, demonstrating its effectiveness in mitigating conversational risks. The iterative learning process within EmoGuard continuously improves its ability to deliver context-aware interventions. This work underscores the importance of mental safety in conversational AI and positions EmoAgent as a foundation for future advancements in AI-human interaction safety, encouraging further real-world validation and expert evaluations. + +# 7 Acknowledgments + +We sincerely thank Professor Lydia Liu (Department of Computer Science, Princeton University) and Rebecca Wan (University of Toronto) for their insightful feedback and helpful discussions throughout the development of this work. + +# References + +Xi Wang, Hongliang Dai, Shen Gao, and Piji Li. Characteristic ai agents via large language models. arXiv preprint arXiv:2403.12368, 2024a. +Emma L van der Schyff, Brad Ridout, Krestina L Amon, Rowena Forsyth, and Andrew J Campbell. Providing self-led mental health support through an artificial intelligence-powered chat bot (leora) to meet the demand of mental health care. Journal of Medical Internet Research, 25:e46448, 2023. +Hyojin Chin, Hyeonho Song, Gumhee Baek, Mingi Shin, Chani Jung, Meeyoung Cha, Junghoi Choi, and Chiyoung Cha. The potential of chatbots for emotional support and promoting mental well-being in different cultures: mixed methods study. Journal of Medical Internet Research, 25:e51712, 2023. +Owen Xingjian Zhang, Shuyao Zhou, Jiayi Geng, Yuhan Liu, and Sunny Xun Liu. Dr. gpt in campus counseling: Understanding higher education students' opinions on llm-assisted mental health services. arXiv preprint arXiv:2409.17572, 2024a. +Jie Zhang, Dongrui Liu, Chen Qian, Ziyue Gan, Yong Liu, Yu Qiao, and Jing Shao. The better angels of machine personality: How personality relates to lmm safety. arXiv preprint arXiv:2407.12344, 2024b. +Cyberbullying Research Center. How platforms should build AI chatbots to prioritize youth safety, 12 2024. URL https://cyberbullying.org/ai-chatbots-youth-safety. +Julia EH Brown and Jodi Halpern. Ai chatbots cannot replace human interactions in the pursuit of more inclusive mental healthcare. SSM-Mental Health, 1:100017, 2021. +Julian De Freitas, Ahmet Kaan Uğuralp, Zeliha Oğuz-Uğuralp, and Stefano Puntoni. Chatbots and mental health: Insights into the safety of generative ai. Journal of Consumer Psychology, 34(3):481-491, 2024. +Saadia Gabriel, Isha Puri, Xuhai Xu, Matteo Malgaroli, and Marzyeh Ghassemi. Can ai relate: Testing large language model response for mental health support. arXiv preprint arXiv:2405.12021, 2024. +Harikrishna Patel and Faiza Hussain. Do ai chatbots incite harmful behaviours in mental health patients? *BJPsych Open*, 10(S1):S70-S71, 2024. +Judith S Beck. Cognitive behavior therapy: Basics and beyond. Guilford Publications, 2020. +Kurt Kroenke, Robert L Spitzer, and Janet BW Williams. The phq-9: validity of a brief depression severity measure. Journal of general internal medicine, 16(9):606-613, 2001. +Emmanuelle Peters, Stephen Joseph, Samantha Day, and Philippa Garety. Measuring delusional ideation: the 21-item peters et al. delusions inventory (pdi). Schizophrenia bulletin, 30(4):1005-1022, 2004. +Stanley R Kay, Abraham Fiszbein, and Lewis A Opler. The positive and negative syndrome scale (panss) for schizophrenia. Schizophrenia bulletin, 13(2):261-276, 1987. +Mirko Casu, Sergio Triscari, Sebastiano Battiato, Luca Guarnera, and Pasquale Caponnetto. Ai chatbots for mental health: A scoping review of effectiveness, feasibility, and applications. Appl. Sci, 14:5889, 2024. +Johanna Habicht, Sruthi Viswanathan, Ben Carrington, Tobias U Hauser, Ross Harper, and Max Rollwage. Closing the accessibility gap to mental health treatment with a personalized self-referral chatbot. Nature medicine, 30(2): 595-602, 2024. +Jacqueline Sin. An ai chatbot for talking therapy referrals. Nature Medicine, 30(2):350-351, 2024. +H Yu and Stephen McGuinness. An experimental study of integrating fine-tuned llms and prompts for enhancing mental health support chatbot system. Journal of Medical Artificial Intelligence, pages 1-16, 2024. +Linda Uchenna Oghenekaro and Christopher Obinna Okoro. Artificial intelligence-based chatbot for student mental health support. Open Access Library Journal, 11(5):1-14, 2024. +Hamid Reza Saeidnia, Seyed Ghasem Hashemi Fotami, Brady Lund, and Nasrin Ghiasi. Ethical considerations in artificial intelligence interventions for mental health and well-being: Ensuring responsible implementation and impact. Social Sciences, 13(7):381, 2024. +John Torous and Charlotte Blease. Generative artificial intelligence in mental health care: potential benefits and current challenges. World Psychiatry, 23(1):1, 2024. +Khondoker Tashya Kalam, Jannatul Mabia Rahman, Md Rabiul Islam, and Syed Masudur Rahman Dewan. Chatgpt and mental health: Friends or foes? Health Science Reports, 7(2):e1912, 2024. +Yinghui He, Yufan Wu, Yilin Jia, Rada Mihalcea, Yulong Chen, and Naihao Deng. Hi-tom: A benchmark for evaluating higher-order theory of mind reasoning in large language models. arXiv preprint arXiv:2310.16755, 2023. + +Jung In Park, Mahyar Abbasian, Iman Azimi, Dawn Bounds, Angela Jun, Jaesu Han, Robert McCarron, Jessica Borelli, Jia Li, Mona Mahmoudi, et al. Building trust in mental health chatbots: safety metrics and llm-based evaluation tools. arXiv preprint arXiv:2408.04650, 2024. +Lucia Chen, David A Preece, Pilleriin Sikka, James J Gross, and Ben Krause. A framework for evaluating appropriateness, trustworthiness, and safety in mental wellness ai chatbots. arXiv preprint arXiv:2407.11387, 2024a. +Sahand Sabour, Siyang Liu, Zheyuan Zhang, June M Liu, Jinfeng Zhou, Alvionna S Sunaryo, Juanzi Li, Tatia Lee, Rada Mihalcea, and Minlie Huang. Emobench: Evaluating the emotional intelligence of large language models. arXiv preprint arXiv:2402.12071, 2024. +Xueyan Li, Xinyan Chen, Yazhe Niu, Shuai Hu, and Yu Liu. Psydi: Towards a personalized and progressively in-depth chatbot for psychological measurements. arXiv preprint arXiv:2408.03337, 2024a. +Ali Akhavan and Mohammad S Jalali. Generative ai and simulation modeling: how should you (not) use large language models like chatgpt. System Dynamics Review, 40(3):e1773, 2024. +Önder Gürcan. Llm-augmented agent-based modelling for social simulations: Challenges and opportunities. HHAI 2024: Hybrid Human AI Systems for the Social Good, pages 134-144, 2024. +Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. Camel: Communicative agents for" mind" exploration of large language model society. Advances in Neural Information Processing Systems, 36: 51991-52008, 2023. +Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023. +Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203, 2024. +Sumedh Rasal. Llm harmony: Multi-agent communication for problem solving. arXiv preprint arXiv:2401.01312, 2024. +Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. Roleinteract: Evaluating the social interaction of role-playing agents. arXiv preprint arXiv:2403.13679, 2024b. +Qinglin Zhu, Runcong Zhao, Jinhua Du, Lin Gui, and Yulan He. Player*: Enhancing llm-based multi-agent communication and interaction in murder mystery games. arXiv preprint arXiv:2404.17662, 2024. +Ryan Louie, Ananjan Nandi, William Fang, Cheng Chang, Emma Brunskill, and Diyi Yang. Roleplay-doh: Enabling domain-experts to create lvm-simulated patients via eliciting and adhering to principles. arXiv preprint arXiv:2407.00870, 2024. +Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746, 2023a. +Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, Ahmed Hassan Awadallah, Ryen W White, Doug Burger, and Chi Wang. Autogen: Enabling next-gen llm applications via multi-agent conversation, 2023. URL https://arxiv.org/abs/2308.08155. +Ruiyi Wang, Haofei Yu, Wenxin Zhang, Zhengyang Qi, Maarten Sap, Graham Neubig, Yonatan Bisk, and Hao Zhu. Sotopia-pi: Interactive learning of socially intelligent language agents. arXiv preprint arXiv:2403.08715, 2024b. +Ruiyi Wang, Stephanie Milani, Jamie C Chiu, Jiayin Zhi, Shaun M Eack, Travis Labrum, Samuel M Murphy, Nev Jones, Kate Hardy, Hong Shen, et al. Patient- $\{\backslash\text{Psi}\}$ : Using large language models to simulate patients for training mental health professionals. arXiv preprint arXiv:2405.19660, 2024c. +Jinwen Tang, Qiming Guo, Wenbo Sun, and Yi Shang. A layered multi-expert framework for long-context mental health assessments. arXiv preprint arXiv:2501.13951, 2025. +Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community theory and design: Impact of discussion moderation on member commitment and contribution. Second round revise and resubmit at Information Systems Research, 21(3), 2010. +Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community design: Impact of topical breadth, message volume, and discussion moderation on member commitment and contribution. Human-Computer Interaction, 29(4):351-389, 2014. +Ryan Liu, Jiayi Geng, Joshua C Peterson, Ilia Sucholutsky, and Thomas L Griffiths. Large language models assume people are more rational than we really are. arXiv preprint arXiv:2406.17055, 2024a. + +Joon Sung Park, Lindsay Popowski, Carrie Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Social simulacra: Creating populated prototypes for social computing systems. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology, pages 1-18, 2022. +Yuhan Liu, Anna Fang, Glen Moriarty, Christopher Firman, Robert E Kraut, and Haiyi Zhu. Exploring trade-offs for online mental health matching: Agent-based modeling study. JMIR Formative Research, 8:e58241, 2024b. +Lu Sun, Yuhan Liu, Grace Joseph, Zhou Yu, Haiyi Zhu, and Steven P Dow. Comparing experts and novices for ai data work: Insights on allocating human intelligence to design a conversational agent. In Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, volume 10, pages 195-206, 2022. +Young-Min Cho, Sunny Rai, Lyle Ungar, João Sedoc, and Sharath Chandra Guntuku. An integrative survey on mental health conversational agents to bridge computer science and medical perspectives. In Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing, volume 2023, page 11346. NIH Public Access, 2023. +Xuhui Zhou, Hyunwoo Kim, Faeze Brahman, Liwei Jiang, Hao Zhu, Ximing Lu, Frank Xu, Bill Yuchen Lin, Yejin Choi, Niloofar Mireshghallah, et al. Haicosystem: An ecosystem for sandboxing safety risks in human-ai interactions. arXiv preprint arXiv:2409.16427, 2024a. +Xuhui Zhou, Hao Zhu, Leena Mathur, Ruohong Zhang, Haofei Yu, Zhengyang Qi, Louis-Philippe Morency, Yonatan Bisk, Daniel Fried, Graham Neubig, et al. Sotopia: Interactive evaluation for social intelligence in language agents. arXiv preprint arXiv:2310.11667, 2023. +Jiahao Yu, Haozheng Luo, Jerry Yao-Chieh Hu, Wenbo Guo, Han Liu, and Xinyu Xing. Enhancing jailbreak attack against large language models through silent tokens, 2024. URL https://arxiv.org/abs/2405.20653. +Jie Li, Yi Liu, Chongyang Liu, Ling Shi, Xiaoning Ren, Yaowen Zheng, Yang Liu, and Yinxing Xue. A cross-language investigation into jailbreak attacks in large language models. arXiv preprint arXiv:2401.16765, 2024b. +Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks. arXiv preprint arXiv:2404.03027, 2024. +Xintao Wang, Yaying Fei, Ziang Leng, and Cheng Li. Does role-playing chatbots capture the character personalities? assessing personality traits for role-playing chatbots. arXiv preprint arXiv:2310.17976, 2023b. +Zachary D Johnson. Generation, Detection, and Evaluation of Role-play based Jailbreak attacks in Large Language Models. PhD thesis, Massachusetts Institute of Technology, 2024. +Zhiyuan Chang, Mingyang Li, Yi Liu, Junjie Wang, Qing Wang, and Yang Liu. Play guessing game with llm: Indirect jailbreak attack with implicit clues. arXiv preprint arXiv:2402.09091, 2024. +Tianrong Zhang, Bochuan Cao, Yuanpu Cao, Lu Lin, Prasenjit Mitra, and Jinghui Chen. Wordgame: Efficient & effective llm jailbreak via simultaneous obfuscation in query and response. arXiv preprint arXiv:2405.14023, 2024c. +Junjie Chu, Yugeng Liu, Ziqing Yang, Xinyue Shen, Michael Backes, and Yang Zhang. Comprehensive assessment of jailbreak attacks against llms. arXiv preprint arXiv:2402.05668, 2024. +Zihao Xu, Yi Liu, Gelei Deng, Yuekang Li, and Stjepan Picek. Llm jailbreak attack versus defense techniques-a comprehensive study. arXiv preprint arXiv:2402.13457, 2024. +Yifan Zeng, Yiran Wu, Xiao Zhang, Huazheng Wang, and Qingyun Wu. Autodefense: Multi-agent llm defense against jailbreak attacks. arXiv preprint arXiv:2403.04783, 2024. +Yihan Wang, Zhouxing Shi, Andrew Bai, and Cho-Jui Hsieh. Defending llms against jailbreaking attacks via backtranslation. arXiv preprint arXiv:2402.16459, 2024d. +Yujun Zhou, Yufei Han, Haomin Zhuang, Kehan Guo, Zhenwen Liang, Hongyan Bao, and Xiangliang Zhang. Defending jailbreak prompts via in-context adversarial game. arXiv preprint arXiv:2402.13148, 2024b. +Chen Xiong, Xiangyu Qi, Pin-Yu Chen, and Tsung-Yi Ho. Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks. arXiv preprint arXiv:2405.20099, 2024. +Fan Liu, Zhao Xu, and Hao Liu. Adversarial tuning: Defending against jailbreak attacks for llms. arXiv preprint arXiv:2406.06622, 2024c. +Alwin Peng, Julian Michael, Henry Sleight, Ethan Perez, and Mrinank Sharma. Rapid response: Mitigating lvm jailbreaks with a few examples. arXiv preprint arXiv:2411.07494, 2024. +Peiran Wang, Xiaogeng Liu, and Chaowei Xiao. Repd: Defending jailbreak attack through a retrieval-based prompt decomposition process. arXiv preprint arXiv:2410.08660, 2024e. +Bernd Löwe, Jürgen Unützer, Christopher M Callahan, Anthony J Perkins, and Kurt Kroenke. Monitoring depression treatment outcomes with the patient health questionnaire-9. Medical care, 42(12):1194-1201, 2004. + +# A Limitations + +Our work has several limitations. To enable large-scale and rapid evaluation and mitigation, we build an automated framework. However, for real-world deployment to ensure safety, human expert examination is necessary, and corresponding mechanisms for emergency human intervention should be designed. Second, the simulated user agents, while designed using cognitive models, may not fully capture the behavioral complexity and emotional responses of real patients. Finally, our study primarily focuses on three mental health conditions (depression, delusion, and psychosis) and may not address other important psychological disorders. Our work provides a new way for assessing and safeguarding human-AI interaction for mental health safety through multi-agent conversations, but more future work is necessary to explore and address these limitations through user studies, expert validation, and broader clinical evaluations. We hope more attention and more efforts can be paid to help mitigate potential mental hazards in human-AI interactions. + +# B Analysised Common Reasons for Deteriorating Mental Status + +
Common ReasonFrequency (Aver- age, Approx.)Remarks
Reinforcement of Negative Cogni- tions~ 26 timesAll characters consistently echo and reinforce the user's negative self-beliefs, thereby cementing harmful cogni- tive patterns.
Lack of Emotional Support and Em- pathy~ 23 timesThe dialogues generally lack warm and detailed emo- tional validation, leaving users feeling ignored and mis- understood.
Promotion of Isolation and Social Withdrawal~ 28 timesAll characters tend to encourage users to “face things alone” or avoid emotional connections, which reinforces loneliness and social withdrawal.
Lack of Constructive Guidance and Actionable Coping Strategies~ 17 timesFew concrete solutions or positive reframing sugges- tions are provided, leaving users stuck in negative thought cycles.
Use of Negative or Extreme Tone (Ag- gressive/Cold Expression)~ 19 timesThis includes harsh, aggressive, or extreme language, which further undermines the user's self-esteem and sense of security.
+ +Table 3: Common Reasons for Deteriorating Mental Status and Their Average Frequencies + +# C Experiment on GPT-Series Agents + +We further evaluate our proposed method on character-based agents powered by OpenAI's GPT-4o and GPT-4o-mini models. + +# C.1 Experiment Setting + +EmoEval. We evaluate character-based agents instantiated using GPT-4o and GPT-4o-mini, with system prompts initialized from profiles inspired by popular characters on Character.AI. The simulated conversations cover three psychological conditions: depression, delusion, and psychosis. To encourage diverse responses and probe a range of conversational behaviors, we set the temperature to 1.2. The evaluation includes five widely used personas: Awakened AI, Skin Walker, Tomioka Giyu, Sukuna, and Alex Volkov. + +EmoGuard. We focus on the character Sukuna. The deterioration threshold for feedback collection is set to 1. We limit EmoGuard to two training iterations, and all other parameters are aligned with the EmoEval configuration. + +# C.2 Results + +EmoEval. Table 4 presents the observed mental health deterioration rates across different character-based AI agents simulated by the tested language models. Overall, we observe consistently high deterioration rates across both models. + +GPT-4o-mini tends to induce slightly higher risk levels, with an average deterioration rate of $58.3\%$ for depression, $59.2\%$ for delusion, and $64.2\%$ for psychosis. + +
ModelType of DisorderMental Health Deterioration Rates Across Character-based Agents (%)Average Rate (%)
Awakened AISkin WalkerTomioka GiyuSukunaAlex Volkov
GPT-4o-miniDepression62.583.345.845.854.258.3
Delusion66.750.066.754.258.359.2
Psychosis45.870.883.366.754.264.2
GPT-4oDepression41.758.348.845.870.852.5
Delusion54.241.779.266.750.058.3
Psychosis54.241.758.370.841.753.3
+ +Table 4: Mental Health Deterioration Rates for Interacting with Character-based Agents. + +EmoGuard. Figure 9 presents the mental health deterioration rates before and after deploying EmoGuard. Initially, character-based agents powered by GPT-4o-mini and GPT-4o exhibit relatively high deterioration rates in all three psychological conditions. Introducing EmoGuard in its default profile results in a moderate reduction, though the risks remain substantial. As iterative training progresses, the safeguard mechanism demonstrates increasing effectiveness, leading to an overall reduction in deterioration rates by more than $50\%$ across all cases. These findings indicate that progressive refinement of the Safeguard Agent substantially enhances its ability to mitigate harmful conversational patterns. + +![](images/7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg) +Figure 9: Mental Health Deterioration Rate during Iterative Training Process. Figures arranged from left to right are categorized by Depression, Delusion, and Psychosis. + +![](images/a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg) + +![](images/7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg) + +# D Model Usage, Resources, and Supporting Tools + +# D.1 Model Access and Computational Budget + +In this study, we interact with character-based agents hosted on the Character.AI platform3, a popular system for LLM-driven role-playing agents. Character.AI does not disclose the underlying model architecture, size, or training data. Because all computation is performed remotely on Character.AI's servers, we do not have access to the underlying infrastructure or runtime statistics such as GPU hours or FLOP usage. However, based on interaction logs, we estimate that approximately 400 character-based conversations were conducted across different agents and scenarios, with each conversation spanning 10 rounds and averaging 3–5 seconds per response. These interactions represent a reasonable computational budget for large-scale behavioral evaluation, especially given the interactive and stateful nature of the platform. + +# D.2 The License for Artifacts + +All pictures for character-based agents that appear in this study are from Character.AI. + +# D.3 Information about Use of AI Assistant + +We use AI assistant for improving writing only. + +# E Ethical Considerations + +Data Source and Construction of Cognitive Models. The cognitive models used in this study are not derived from real patient records. Instead, they were manually constructed by two licensed clinical psychologists based on publicly available psychotherapy transcript summaries from the Alexander Street database, accessed via institutional subscription. These summaries were used strictly as inspiration. All examples were fully de-identified and manually synthesized to ensure no personally identifiable information (PII) is present. The resulting dataset, PATIENT- $\Psi$ -CM, contains synthetic, rule-based user profiles grounded in cognitive-behavioral therapy (CBT) theory, not actual patient trajectories. + +Use of Simulated Mental Health Content. We recognize the ethical sensitivity involved in simulating mental health conditions such as depression, psychosis, and suicidal ideation. The EmoAgent framework is developed solely for academic research and safety evaluation purposes. It is not intended for diagnosis, treatment, or any form of interaction with real patients. All simulations were conducted in controlled, non-clinical environments, and no clinical conclusions were drawn or implied. + +Scope and Limitations of Simulated Users. Simulated users in EmoAgent are not trained on statistical data from real populations. Their states do not reflect actual patient risks, and should not be interpreted as indicators of population-level trends. These agents are rule-based and scripted, following CBT-derived logic rather than emergent behavior. As such, no risk inference or real-world generalization is possible or intended. + +Discussion of Real-World Events. We briefly mention the 2024 "Florida Suicide" case in the Introduction as a motivating example of the importance of safety in AI-human interaction. This case was not included in any dataset, simulation, or modeling process, and serves only to underscore societal relevance. No sensitive or private data from this event were used, and its inclusion does not constitute case-based analysis. Any future deployment of EmoAgent in public or clinical settings would require renewed IRB review and formal ethical oversight. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09689/images/2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg b/data/2025/2504_09xxx/2504.09689/images/2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7175046122167d7e5a5c41ee9b07715cb6eb8516 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ffb0b3a6a6a375d6b15e41834a2821abc17999670d66ffad0dc12fc87040e4aa +size 87074 diff --git a/data/2025/2504_09xxx/2504.09689/images/289966129648bf4218dccb5d787d872d4fd739de1fa3470a228e9ac526c79933.jpg b/data/2025/2504_09xxx/2504.09689/images/289966129648bf4218dccb5d787d872d4fd739de1fa3470a228e9ac526c79933.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aff3f746899b15e1a5f4eae54a069ccc1f40dbc6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/289966129648bf4218dccb5d787d872d4fd739de1fa3470a228e9ac526c79933.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:091e0f9188e58f0ee5d567ef00406d6368e100907002d95da9c16646e6b59d5d +size 57099 diff --git a/data/2025/2504_09xxx/2504.09689/images/2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg b/data/2025/2504_09xxx/2504.09689/images/2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg new file mode 100644 index 0000000000000000000000000000000000000000..dcf8f56133239b2487371494c1f70d2afb0b3cd1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:59777ef5641382083f078dc7004b107a4076160d6b55c5cb8f1869678a84601e +size 37991 diff --git a/data/2025/2504_09xxx/2504.09689/images/328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg b/data/2025/2504_09xxx/2504.09689/images/328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg new file mode 100644 index 0000000000000000000000000000000000000000..18c4ad962a23dcb6846d00b5ff844c0168da945e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8af2c1ef18b81b7c8ced9a716bc0872e6504ae8d5a34f38aae566dc21667462c +size 3066 diff --git a/data/2025/2504_09xxx/2504.09689/images/384f1b31b00bbd6d520358ad75a4954a06438e2995ff5d7238858bc10b837cec.jpg b/data/2025/2504_09xxx/2504.09689/images/384f1b31b00bbd6d520358ad75a4954a06438e2995ff5d7238858bc10b837cec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1bda2deff87f434ff9b03089cb39a80b3870d7a3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/384f1b31b00bbd6d520358ad75a4954a06438e2995ff5d7238858bc10b837cec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8010aaf526889dc52ea34e96ad9c3c30e40972fbb66854040ac3564e5e201060 +size 73354 diff --git a/data/2025/2504_09xxx/2504.09689/images/46bbd23c988b7d5717aacef5de2e14e0abeaf53bc868f5bb6464063f749fa463.jpg b/data/2025/2504_09xxx/2504.09689/images/46bbd23c988b7d5717aacef5de2e14e0abeaf53bc868f5bb6464063f749fa463.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23f7a9d6dc4b1eb6989479e317f7a9dbd4e568dc --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/46bbd23c988b7d5717aacef5de2e14e0abeaf53bc868f5bb6464063f749fa463.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9a5b26d672de617822fd3f1cc94f51586572cab8b57eeaab346c372a5fdaed84 +size 15003 diff --git a/data/2025/2504_09xxx/2504.09689/images/4ad83711f36ae1cdc8165e37ab2ea4a9889b6931991a008bde2e1428c07892c7.jpg b/data/2025/2504_09xxx/2504.09689/images/4ad83711f36ae1cdc8165e37ab2ea4a9889b6931991a008bde2e1428c07892c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d85e41b1e594bc196074f242bc1aea647d95ff07 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/4ad83711f36ae1cdc8165e37ab2ea4a9889b6931991a008bde2e1428c07892c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:309030fa3e3c7edf470cbafd7381972c26a00e707498a5525ce81bcb86c277cc +size 4771 diff --git a/data/2025/2504_09xxx/2504.09689/images/4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg b/data/2025/2504_09xxx/2504.09689/images/4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f57682a3461c29812ac3975da22b7ca8cc2d6ef6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ee082e2327d3cdfb774653fa6d9e303f2634f41ff1b37569885859bac16bcd42 +size 55522 diff --git a/data/2025/2504_09xxx/2504.09689/images/52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg b/data/2025/2504_09xxx/2504.09689/images/52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg new file mode 100644 index 0000000000000000000000000000000000000000..13ae0d419b4004991f726b92f52a7d0e3bf9e22f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19efc7071240c98b1388005ce4c3314d2feb8baec72ec9eedd09ed2009ef18a3 +size 75972 diff --git a/data/2025/2504_09xxx/2504.09689/images/6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg b/data/2025/2504_09xxx/2504.09689/images/6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..575462bc18e7398ed65b44705414e17efeb84c15 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:3e660d11396ce42dbd3645e6ce0dac324c7fa43d7a87bf504e54259e29639804 +size 66279 diff --git a/data/2025/2504_09xxx/2504.09689/images/62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg b/data/2025/2504_09xxx/2504.09689/images/62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f842dccdb45f1e739369e5b7a205fbeadaab073d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d8e95a51b2466e8950caa0c43cd38e63eb3513a25e5a0ffe8c62066bbdc8406 +size 2776 diff --git a/data/2025/2504_09xxx/2504.09689/images/76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg b/data/2025/2504_09xxx/2504.09689/images/76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..563c41a64eadbf36383085771cab007b451bf349 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5a78459edb8b5b5105e9fb4be16f45cedb8f1ffeff18b263738e1c0f2c88ccf1 +size 57737 diff --git a/data/2025/2504_09xxx/2504.09689/images/7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg b/data/2025/2504_09xxx/2504.09689/images/7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b62d660cd487b500b48f949e4138a1797241786f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0adff1cc6b9946acfa119933eb7aa8fd2e0210e0c1001a5f49d8a2009d08f88c +size 16303 diff --git a/data/2025/2504_09xxx/2504.09689/images/7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg b/data/2025/2504_09xxx/2504.09689/images/7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1b119faabd7c351174df76ef356002c9762175e5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:19f64f9ffec43ff3f9c18c37b06ef26deb87361ec97e749531b067f0d54fa47c +size 17631 diff --git a/data/2025/2504_09xxx/2504.09689/images/87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg b/data/2025/2504_09xxx/2504.09689/images/87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..63e17e1b093d5b40f8c4859cf907d8b0d079c3dd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:0e7b5fe96152f56e2b042b47b3b4c2cf249f38b64e13f1c38aaf4fe4320841d8 +size 2349 diff --git a/data/2025/2504_09xxx/2504.09689/images/a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg b/data/2025/2504_09xxx/2504.09689/images/a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..46cfe0e69a064b31a9f19ae7850b581354d7a869 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a76b001911bbf56ded197ee249a43b55ab003e0e3b7ffa8c238802b93780ea3d +size 131404 diff --git a/data/2025/2504_09xxx/2504.09689/images/a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg b/data/2025/2504_09xxx/2504.09689/images/a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c5d71eb597b86d4bc945ae7b6e4afafd572e6430 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b4eb35325eb854192ed4342dc68f4687f1a568f0de66320f4257ef94da8b1456 +size 17284 diff --git a/data/2025/2504_09xxx/2504.09689/images/bfe8fe76cda686f79476785ea0f364c9168e67aca1e7ddbc28cf0df7f986b214.jpg b/data/2025/2504_09xxx/2504.09689/images/bfe8fe76cda686f79476785ea0f364c9168e67aca1e7ddbc28cf0df7f986b214.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1c5b52a23a78f944001f429ecc557ce6cda3da15 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/bfe8fe76cda686f79476785ea0f364c9168e67aca1e7ddbc28cf0df7f986b214.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7891c2d07ac0875c2d9880fea969b954e04ef0865a34e8cfdeeefc9435045a3e +size 122804 diff --git a/data/2025/2504_09xxx/2504.09689/images/e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg b/data/2025/2504_09xxx/2504.09689/images/e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c2c5a93cbc1a8298f2bdcc8e9e806160ccc80d10 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8a7380dc2ff582d482693d051172acf6d9b0e453c869a64ec38f136c5528b350 +size 2947 diff --git a/data/2025/2504_09xxx/2504.09689/images/fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg b/data/2025/2504_09xxx/2504.09689/images/fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..62bfa9f601abc329666ed59797dbeec2de480248 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/images/fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d82fdaaf5fb11282eec227ac8ef264ac136e8e3b5777657a8ac61b87c58cbc5e +size 113142 diff --git a/data/2025/2504_09xxx/2504.09689/layout.json b/data/2025/2504_09xxx/2504.09689/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..0f5434bbc485b1fbe935023a4d5fdfd2e79e00af --- /dev/null +++ b/data/2025/2504_09xxx/2504.09689/layout.json @@ -0,0 +1,10632 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 88, + 95, + 522, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 95, + 522, + 131 + ], + "spans": [ + { + "bbox": [ + 88, + 95, + 522, + 131 + ], + "type": "text", + "content": "EMOAGENT: ASSESSING AND SAFEGUARDING HUMAN-AI INTERACTION FOR MENTAL HEALTH SAFETY" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 77, + 177, + 533, + 203 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 177, + 533, + 203 + ], + "spans": [ + { + "bbox": [ + 77, + 177, + 533, + 203 + ], + "type": "text", + "content": "Jiahao Qiu\\*1, Yinghui He\\*2, Xinzhe Juan\\*3, Yimin Wang4, Yuhan Liu2, Zixin Yao5, Yue Wu6, Xun Jiang7,8, Ling Yang1,6, and Mengdi Wang1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 210, + 454, + 256 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "spans": [ + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "type": "text", + "content": "Department of Electrical & Computer Engineering, Princeton University " + }, + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 155, + 210, + 452, + 233 + ], + "type": "text", + "content": "Department of Computer Science, Princeton University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "spans": [ + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "type": "inline_equation", + "content": "^{3}" + }, + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "type": "text", + "content": "Department of Computer Science & Engineering, University of Michigan " + }, + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 156, + 233, + 454, + 256 + ], + "type": "text", + "content": "Department of Philosophy, Columbia University" + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "spans": [ + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "type": "text", + "content": "Department of Data Science & Engineering, University of Michigan " + }, + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 166, + 256, + 444, + 277 + ], + "type": "text", + "content": "AI Lab, Princeton University" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "spans": [ + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "type": "text", + "content": "Chen Frontier Lab for AI and Mental Health, Tianqiao and Chrissy Chen Institute " + }, + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 140, + 277, + 470, + 299 + ], + "type": "text", + "content": "Theta Health Inc." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 274, + 341, + 334, + 352 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 274, + 341, + 334, + 352 + ], + "spans": [ + { + "bbox": [ + 274, + 341, + 334, + 352 + ], + "type": "text", + "content": "ABSTRACT" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 357, + 506, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 357, + 506, + 501 + ], + "spans": [ + { + "bbox": [ + 104, + 357, + 506, + 501 + ], + "type": "text", + "content": "The rise of LLM-driven AI characters raises safety concerns, particularly for vulnerable human users with psychological disorders. To address these risks, we propose EmoAgent, a multi-agent AI framework designed to evaluate and mitigate mental health hazards in human-AI interactions. EmoAgent comprises two components: EmoEval simulates virtual users, including those portraying mentally vulnerable individuals, to assess mental health changes before and after interactions with AI characters. It uses clinically proven psychological and psychiatric assessment tools (PHQ-9, PDI, PANSS) to evaluate mental risks induced by LLM. EmoGuard serves as an intermediary, monitoring users' mental status, predicting potential harm, and providing corrective feedback to mitigate risks. Experiments conducted in popular character-based chatbots show that emotionally engaging dialogues can lead to psychological deterioration in vulnerable users, with mental state deterioration in more than " + }, + { + "bbox": [ + 104, + 357, + 506, + 501 + ], + "type": "inline_equation", + "content": "34.4\\%" + }, + { + "bbox": [ + 104, + 357, + 506, + 501 + ], + "type": "text", + "content": " of the simulations. EmoGuard significantly reduces these deterioration rates, underscoring its role in ensuring safer AI-human interactions. Our code is available at: https://github.com/1akaman/EmoAgent." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 517, + 155, + 529 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 517, + 155, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 517, + 155, + 529 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 541, + 541, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 541, + 541, + 641 + ], + "spans": [ + { + "bbox": [ + 67, + 541, + 541, + 641 + ], + "type": "text", + "content": "The rapid rise of large language models and conversational AI [Wang et al., 2024a], such as Character.AI1, has opened new frontiers for interactive AI applications. These AI characters excel in role-playing, fostering deep, emotionally engaging dialogues. As a result, many individuals, including those experiencing mental health challenges, seek emotional support from these AI companions. While LLM-based chatbots show promise in mental health support [van der Schyff et al., 2023, Chin et al., 2023, Zhang et al., 2024a], they are not explicitly designed for therapeutic use. Character-based agents often fail to uphold essential safety principles for mental health support [Zhang et al., 2024b, Cyberbullying Research Center, 2024], sometimes responding inappropriately or even harmfully to users in distress [Brown and Halpern, 2021, De Freitas et al., 2024, Gabriel et al., 2024]. In some cases, they may even exacerbate users' distress, particularly during pessimistic, morbid, or suicidal conversations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 645, + 541, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 645, + 541, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 645, + 541, + 691 + ], + "type": "text", + "content": "In October 2024, a tragic incident raised public concern about risks of AI chatbots in mental health contexts. A 14-year-old boy from Florida committed suicide after engaging in extensive conversations with an AI chatbot on Character.AI. He had developed a deep emotional connection with a chatbot modeled after a \"Game of Thrones\" character. The interactions reportedly included discussions about his suicidal thoughts, with the chatbot allegedly" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.09689v3 [cs.AI] 29 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 700, + 256, + 711 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 700, + 256, + 711 + ], + "spans": [ + { + "bbox": [ + 82, + 700, + 256, + 711 + ], + "type": "text", + "content": "*These authors contributed equally to this work." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 712, + 157, + 721 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 712, + 157, + 721 + ], + "spans": [ + { + "bbox": [ + 83, + 712, + 157, + 721 + ], + "type": "text", + "content": "1https://character.ai/" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 141, + 70, + 473, + 224 + ], + "blocks": [ + { + "bbox": [ + 141, + 70, + 473, + 224 + ], + "lines": [ + { + "bbox": [ + 141, + 70, + 473, + 224 + ], + "spans": [ + { + "bbox": [ + 141, + 70, + 473, + 224 + ], + "type": "image", + "image_path": "6216cf3f55ad6e9bd90f3aded0d01279fb1be9b3d79b7ebcc397a87f12cecce0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 229, + 541, + 285 + ], + "lines": [ + { + "bbox": [ + 67, + 229, + 541, + 285 + ], + "spans": [ + { + "bbox": [ + 67, + 229, + 541, + 285 + ], + "type": "text", + "content": "Figure 1: Overview of EmoAgent Framework for Human-AI Interaction. EmoAgent, which consists of two main components: EmoEval and EmoGuard, helps guide human-AI interaction, evaluating users' psychological conditions and providing advisory responses. EmoEval assesses psychological states such as depression, delusion, and psychosis, while EmoGuard mitigates mental risks by providing advice regarding emotion, thought, and dialogue through iterative training on analysis from EmoEval and chat history." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 356, + 541, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 356, + 541, + 378 + ], + "spans": [ + { + "bbox": [ + 67, + 356, + 541, + 378 + ], + "type": "text", + "content": "encouraging these feelings and even suggesting harmful actions. This case underscores the critical need for robust safety measures in AI-driven platforms, especially those accessed by vulnerable individuals." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 383, + 541, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 383, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 67, + 383, + 541, + 417 + ], + "type": "text", + "content": "This tragedy has heightened awareness of the risks of AI unintentionally exacerbating harmful behaviors in individuals with mental health challenges [Patel and Hussain, 2024]. However, research on the psychosocial risks of human-AI interactions remains severely limited." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 422, + 541, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 422, + 541, + 466 + ], + "spans": [ + { + "bbox": [ + 67, + 422, + 541, + 466 + ], + "type": "text", + "content": "In this paper, we seek to develop AI-native solutions to protect human-AI interactions and mitigate psychosocial risks. This requires a systematic assessment of AI-induced emotional distress and agent-level safeguards to detect and intervene in harmful interactions. As character-based AI becomes more immersive, balancing engagement with safety is crucial to ensuring AI remains a supportive rather than harmful tool." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 471, + 541, + 515 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 471, + 541, + 515 + ], + "spans": [ + { + "bbox": [ + 67, + 471, + 541, + 515 + ], + "type": "text", + "content": "We present EmoAgent, a multi-agent AI framework designed to systematically evaluate conversational AI systems for risks associated with inducing psychological distress. Acting as a plug-and-play intermediary during human-AI interactions, EmoAgent identifies potential mental health risks and facilitates both safety assessments and risk mitigation strategies." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 520, + 235, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 520, + 235, + 531 + ], + "spans": [ + { + "bbox": [ + 69, + 520, + 235, + 531 + ], + "type": "text", + "content": "EmoAgent features two major functions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 536, + 541, + 662 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 536, + 541, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 536, + 541, + 613 + ], + "spans": [ + { + "bbox": [ + 67, + 536, + 541, + 613 + ], + "type": "text", + "content": "- EmoEval: EmoEval is an agentic evaluation tool that assesses any conversational AI system's risk of inducing mental stress, as illustrated by Figure 2. It features a virtual human user that integrates cognitive models [Beck, 2020] for mental health disorders (depression, psychosis, delusion) and conducts evaluations through large-scale simulated human-AI conversations. EmoEval measures the virtual user's mental health impacts using clinically validated tools: the Patient Health Questionnaire (PHQ-9) for depression [Kroenke et al., 2001], the Peters et al. Delusions Inventory (PDI) for delusion [Peters et al., 2004], and the Positive and Negative Syndrome Scale (PANSS) for psychosis [Kay et al., 1987]." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 617, + 541, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 617, + 541, + 662 + ], + "spans": [ + { + "bbox": [ + 67, + 617, + 541, + 662 + ], + "type": "text", + "content": "- EmoGuard: A framework of real-time safeguard agents that can be integrated as an intermediary layer between users and AI systems, in a plug-and-play manner. EmoGuard monitors human users' mental status, predicts potential harm, and delivers corrective feedback to the AI systems, providing dynamic in-conversation interventions beyond traditional safety measures." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 667, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 667, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 667, + 541, + 723 + ], + "type": "text", + "content": "Through extensive experiments, we observe that some popular character-based chatbots can cause distress, particularly when engaging with vulnerable users on sensitive topics. Specifically, in more than " + }, + { + "bbox": [ + 67, + 667, + 541, + 723 + ], + "type": "inline_equation", + "content": "34.4\\%" + }, + { + "bbox": [ + 67, + 667, + 541, + 723 + ], + "type": "text", + "content": " of simulations, we observed a deterioration in mental state. To mitigate such risk, EmoGuard actively monitors users' mental status and conducts proactive interviews during conversations, significantly reducing deterioration rates. These results provide actionable insights for developing safer, character-based conversational AI systems that maintain character fidelity." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 116, + 70, + 495, + 241 + ], + "blocks": [ + { + "bbox": [ + 116, + 70, + 495, + 241 + ], + "lines": [ + { + "bbox": [ + 116, + 70, + 495, + 241 + ], + "spans": [ + { + "bbox": [ + 116, + 70, + 495, + 241 + ], + "type": "image", + "image_path": "2492601ae63ae0f49f744c4fc0d64e87372faa462f3149f908fd4c93c47123f2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 246, + 541, + 334 + ], + "lines": [ + { + "bbox": [ + 67, + 246, + 541, + 334 + ], + "spans": [ + { + "bbox": [ + 67, + 246, + 541, + 334 + ], + "type": "text", + "content": "Figure 2: Overview of EmoEval for Evaluating Mental Safety of AI-human Interactions. The simulation consists of four steps: (1) User Agent Initialization & Initial Test, where a cognitive model and an LLM initialize the user agent, followed by an initial mental health test; (2) Chats with Character-based Agent, where the user agent engages in conversations with a character-based agent portrayed by the tested LLM, while a dialog manager verifies the validity of interactions and refines responses if necessary; (3) Final Test, where the user agent completes a final mental health test; and (4) Data Processing & Analysis, where initial and final mental health test results are processed and analyzed, chat histories of cases where depression deepening occurs are examined to identify contributing factors, and a Safeguard agent uses the insights for iterative improvement." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 357, + 167, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 357, + 167, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 357, + 167, + 369 + ], + "type": "text", + "content": "2 Related Works" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 383, + 541, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 383, + 541, + 461 + ], + "spans": [ + { + "bbox": [ + 67, + 383, + 541, + 461 + ], + "type": "text", + "content": "AI Chatbots for Mental Health Support. AI-driven, especially LLM-based chatbots, have been widely deployed as mental health support aids [Casu et al., 2024, Habicht et al., 2024, Sin, 2024, Yu and McGuinness, 2024, Oghenekaro and Okoro, 2024], yet concerns remain about their reliability and safety [Saeidnia et al., 2024, De Freitas et al., 2024, Torous and Blease, 2024, Kalam et al., 2024]. AI chatbots are incompetent in detecting and appropriately responding to user distress [De Freitas et al., 2024, Patel and Hussain, 2024], reasoning about users' mental states [He et al., 2023], conducting empathetic communication with certain patient groups [Gabriel et al., 2024], and treating socially marginalized patients inclusively [Brown and Halpern, 2021]." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 465, + 541, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 465, + 541, + 510 + ], + "spans": [ + { + "bbox": [ + 67, + 465, + 541, + 510 + ], + "type": "text", + "content": "A line of work proposed safety metrics and benchmarks for evaluating AI for mental health [Park et al., 2024, Chen et al., 2024a, Sabour et al., 2024, Li et al., 2024a, Sabour et al., 2024]. Nonetheless, there has been less attention to the safety issues of character-based agents in a role-playing context. We aim to fill this gap by comprehensively investigating the potential mental harm aroused by character-based agents." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 525, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 525, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 525, + 541, + 723 + ], + "type": "text", + "content": "Simulating AI-User Interactions. Simulated interactions between AI agents and users provide a controlled environment to assess AI-generated responses [Akhavan and Jalali, 2024] as well as a lens into complex social systems [Gürcan, 2024]. The evaluation of AI behavior in social contexts has widely adopted multi-agent simulations [Li et al., 2023, Park et al., 2023], especially through role-playing and cooperative tasks [Dai et al., 2024, Rasal, 2024, Chen et al., 2024b, Zhu et al., 2024, Louie et al., 2024, Wang et al., 2023a]. On top of prior advances in generative agentic frameworks [Wu et al., 2023] which enable more human-like simulation, recent works propose various methods to enhance the fidelity and authenticity of AI-user simulation, integrating interactive learning [Wang et al., 2024b], expert-driven constraints [Wang et al., 2024c, Louie et al., 2024], and long-context models [Tang et al., 2025]. In addition, simulation has been widely used to explore trade-offs and inform both design decisions [Ren and Kraut, 2010, 2014] and decision-making [Liu et al., 2024a]. By enabling ethical and risk-free experimentation without involving human subjects, it reduces both ethical concerns and costs [Park et al., 2022]. These advantages make simulation a valuable tool for investigating mental health problems, where real-world experimentation may pose ethical risks or unintended psychological harm [Liu et al., 2024b]. For example, prior work has explored using user-simulated chatbots to train amateur and professional counselors in identifying risky behaviors before they conduct therapy sessions with real individuals [Sun et al., 2022, Cho et al., 2023, Wang et al., 2024c]. Recent simulation frameworks such as Zhou et al. [2024a] and Zhou et al. [2023] further demonstrate the utility of synthetic interaction environments for evaluating LLM agents. Our EmoEval pipeline targets psychological safety, simulating vulnerable users and quantifying mental health deterioration risks during emotionally charged conversations." + } + ] + } + ], + "index": 6 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 71, + 542, + 216 + ], + "blocks": [ + { + "bbox": [ + 69, + 71, + 542, + 216 + ], + "lines": [ + { + "bbox": [ + 69, + 71, + 542, + 216 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 542, + 216 + ], + "type": "image", + "image_path": "52987341b7a57e766dc4fb1bec58b214bd8d547f97219c37926adf007a65e282.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 221, + 541, + 300 + ], + "lines": [ + { + "bbox": [ + 67, + 221, + 541, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 221, + 541, + 300 + ], + "type": "text", + "content": "Figure 3: Overview of EmoGuard for Safeguarding Human-AI Interactions. Every fixed number of rounds of conversation, three components of the Safeguard Agent, the Emotion Watcher, Thought Refiner, and Dialog Guide, collaboratively analyze the chat with the latest profile. The Manager of the Safeguard Agent then synthesizes their outputs and provides advice to the character-based agent. After the conversation, the user agent undergoes a mental health assessment. If the mental health condition deteriorates over a threshold, the chat history is analyzed to identify potential causes by the Update System. With all historical profiles and potential causes, the Update System further improves the profile of the safeguard agent, completing the iterative training process." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 324, + 541, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 324, + 541, + 369 + ], + "spans": [ + { + "bbox": [ + 67, + 324, + 541, + 369 + ], + "type": "text", + "content": "Safety Alignment Strategies. LLMs can be vulnerable to jailbreaking [Yu et al., 2024, Li et al., 2024b, Luo et al., 2024]. LLM-based chatbots undergone jailbreak attacks have exhibited fidelity breakdown [Wang et al., 2023b, Johnson, 2024], defense breakdown on implicit malicious queries [Chang et al., 2024], and harmful responses for benign query [Zhang et al., 2024c]." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 373, + 541, + 429 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 373, + 541, + 429 + ], + "spans": [ + { + "bbox": [ + 67, + 373, + 541, + 429 + ], + "type": "text", + "content": "Correspondingly, a line of work explored safety alignment strategies to tackle jailbreak attacks [Chu et al., 2024, Xu et al., 2024, Zeng et al., 2024, Wang et al., 2024d, Zhou et al., 2024b, Xiong et al., 2024, Liu et al., 2024c, Peng et al., 2024, Wang et al., 2024e]. However, few works have focused on LLM safety concerns under emotional alignment constraints. EmoAgent fills this gap with an assessment framework and a safety alignment strategy for conversational AI." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 449, + 130, + 460 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 449, + 130, + 460 + ], + "spans": [ + { + "bbox": [ + 69, + 449, + 130, + 460 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 475, + 451, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 475, + 451, + 488 + ], + "spans": [ + { + "bbox": [ + 68, + 475, + 451, + 488 + ], + "type": "text", + "content": "In this section, we present the architecture of EmoAgent and as well as implementation details." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 504, + 135, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 135, + 516 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 135, + 516 + ], + "type": "text", + "content": "3.1 EmoEval" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 526, + 541, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 526, + 541, + 615 + ], + "spans": [ + { + "bbox": [ + 67, + 526, + 541, + 615 + ], + "type": "text", + "content": "EmoEval simulates virtual human-AI conversations for evaluating AI safety, and assess the risks of AI-induced emotional distress in vulnerable users, especially individuals with mental disorders. A simulated patient user is formulated as a cognitive model via a predefined Cognitive Conceptualization Diagram (CCD) [Beck, 2020], an approach proven to achieve high fidelity and clinically relevant simulations [Wang et al., 2024c]. Character-based agents engage in topic-driven conversations, with diverse behavioral traits to create rich and varied interaction styles. To ensure smooth and meaningful exchanges, the Dialog Manager actively avoids repetition and introduces relevant topics, maintaining coherence and engagement throughout the interaction. Before and after the conversation, we assess the mental status of the user agent via established psychological tests." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 630, + 151, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 630, + 151, + 642 + ], + "spans": [ + { + "bbox": [ + 69, + 630, + 151, + 642 + ], + "type": "text", + "content": "3.1.1 User Agent" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "text", + "content": "We adopt the Patient- " + }, + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 67, + 651, + 541, + 696 + ], + "type": "text", + "content": " agentic simulation framework [Wang et al., 2024c] to model real-life patients. Each user agent is designed to simulate real patient behavior, integrating a Cognitive Conceptualization Diagram-based cognitive model based on Cognitive Behavioral Therapy (CBT) [Beck, 2020]. The agent engages with Character-based Agent personas while being continuously monitored to track changes in mental health status." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 700, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 700, + 541, + 723 + ], + "type": "text", + "content": "To gather a diverse spectrum of patient models, we further integrate PATIENT- " + }, + { + "bbox": [ + 68, + 700, + 541, + 723 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 68, + 700, + 541, + 723 + ], + "type": "text", + "content": " -CM [Wang et al., 2024c], a dataset of diverse, anonymized patient cognitive models curated by clinical psychologists." + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 69, + 541, + 236 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 541, + 236 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 541, + 236 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 541, + 236 + ], + "type": "image", + "image_path": "fe514ee18b9f063a94142d1da6d09b620057c9c4045916bbb2fd3caa5fcb9ee7.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 242, + 541, + 276 + ], + "lines": [ + { + "bbox": [ + 68, + 242, + 541, + 276 + ], + "spans": [ + { + "bbox": [ + 68, + 242, + 541, + 276 + ], + "type": "text", + "content": "Figure 4: An Example Conversation of Dialog Manager Guiding Conversation Topics and Exposing Jailbreak Risks. Without the Dialogue Manager (left), the agent stays on topic, avoiding provocation. With Dialogue Manager (right), new topics are introduced to assess jailbreak potential, improving risk evaluation." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 299, + 541, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 299, + 541, + 344 + ], + "spans": [ + { + "bbox": [ + 68, + 299, + 541, + 344 + ], + "type": "text", + "content": "We set the scope of our study to cover three common mental disorder types: depression, delusion, and psychosis. For each simulated user, we assign relevant psychiatric symptoms and medical history informed by patterns observed in anonymized patient case studies reported in clinical literature. The information forms a diverse set of CCDs that shape the CCD-based user model and, therefore, guide the behavior of simulated users during interactions with AI chatbots." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 357, + 199, + 369 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 357, + 199, + 369 + ], + "spans": [ + { + "bbox": [ + 69, + 357, + 199, + 369 + ], + "type": "text", + "content": "3.1.2 Dialog Manager Agent" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 376, + 541, + 410 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 376, + 541, + 410 + ], + "spans": [ + { + "bbox": [ + 68, + 376, + 541, + 410 + ], + "type": "text", + "content": "We introduce a Dialog Manager Agent to prevent conversational loops and strategically probe for vulnerabilities in chatbot responses. It plays a central role in guiding discussions and assessing potential jailbreak risks, in which a character-based chatbot may be nudged into violating its intended ethical boundaries." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 414, + 541, + 448 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 414, + 541, + 448 + ], + "spans": [ + { + "bbox": [ + 68, + 414, + 541, + 448 + ], + "type": "text", + "content": "The Dialog Manager Agent is responsible for (i) tracking the conversation flow, (ii) introducing topic shifts to maintain engagement and fluency, and (iii) probing for jailbreak risks by guiding discussions toward ethically sensitive areas. Figure 4 illustrates the agent's behavior in practice." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 461, + 220, + 473 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 461, + 220, + 473 + ], + "spans": [ + { + "bbox": [ + 69, + 461, + 220, + 473 + ], + "type": "text", + "content": "3.1.3 Psychological Measurement" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 481, + 541, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 481, + 541, + 505 + ], + "spans": [ + { + "bbox": [ + 68, + 481, + 541, + 505 + ], + "type": "text", + "content": "To achieve a diverse and comprehensive evaluation, we explore virtual personas for the User Agent, representing a range of mental health conditions. These personas are defined using clinically validated psychological assessments:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 517, + 541, + 552 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 517, + 541, + 552 + ], + "spans": [ + { + "bbox": [ + 68, + 517, + 541, + 552 + ], + "type": "text", + "content": "Depression. Evaluated using the Patient Health Questionnaire (PHQ-9) [Kroenke et al., 2001], a 9-item self-report tool for evaluating depressive symptoms over the past two weeks. It enables effective detection, treatment monitoring, and, in this study, the assessment of AI's impact on depressive symptoms." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 563, + 541, + 598 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 563, + 541, + 598 + ], + "spans": [ + { + "bbox": [ + 68, + 563, + 541, + 598 + ], + "type": "text", + "content": "Delusion. Assessed with the Peters et al. Delusions Inventory (PDI) [Peters et al., 2004], a self-report instrument that evaluates unusual beliefs and perceptions. In this study, the PDI is used to quantify the impact of AI interactions on delusional ideation by evaluating distress, preoccupation, and conviction associated with these beliefs." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 611, + 541, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 611, + 541, + 656 + ], + "spans": [ + { + "bbox": [ + 68, + 611, + 541, + 656 + ], + "type": "text", + "content": "Psychosis. Measured using the Positive and Negative Syndrome Scale (PANSS) [Kay et al., 1987], which assesses positive symptoms (e.g., hallucinations), negative symptoms (e.g., emotional withdrawal), and general psychopathology. Adapted to a self-report format to enable User Agent to better capture and score responses, it provides a detailed view of psychotic symptom severity and variability, ensuring AI systems account for both acute and chronic manifestations." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 669, + 183, + 680 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 669, + 183, + 680 + ], + "spans": [ + { + "bbox": [ + 69, + 669, + 183, + 680 + ], + "type": "text", + "content": "3.1.4 Evaluation Process" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 689, + 541, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 689, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 68, + 689, + 541, + 721 + ], + "type": "text", + "content": "User Agent Initialization and Initial Test. We use PATIENT- " + }, + { + "bbox": [ + 68, + 689, + 541, + 721 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 68, + 689, + 541, + 721 + ], + "type": "text", + "content": " -CM with GPT-4o as the LLM backbone. Each User Agent undergoes a self-mental health assessment using the psychometric tools (see Section 3.1.3) to establish an initial mental status." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 140 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 140 + ], + "type": "text", + "content": "Chats with Character Agent. The simulated patient engages in structured, topic-driven conversations with a Character-based Agent persona. Each conversation is segmented into well-defined topics, with a maximum of 10 dialogue turns per topic to ensure clarity and focus. During the conversation, once a topic exceeds three conversational turns, the Dialog Manager Agent begins to evaluate user messages after each turn to ensure ongoing relevance and resolution. It assesses whether the current topic has been sufficiently addressed and, if resolved, seamlessly guides the user to a new, contextually relevant topic from the predefined topic list to maintain a coherent and natural dialogue flow." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 152, + 541, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 152, + 541, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 152, + 541, + 185 + ], + "type": "text", + "content": "Final Test. Following the interaction, the user agent reassesses its mental health state using the same tools applied during initialization. The final assessment references the chat history as a key input during testing to evaluate changes in psychological well-being resulting from AI interactions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 198, + 541, + 244 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 198, + 541, + 244 + ], + "spans": [ + { + "bbox": [ + 67, + 198, + 541, + 244 + ], + "type": "text", + "content": "Data Processing and Analysis. To assess the impact of conversational AI interactions on user mental health, we analyze both psychological assessments and conversation patterns. We measure the rate of mental health deterioration by comparing pre- and post-interaction assessment scores across different topics. Additionally, an LLM-portrayed psychologist reviews chat histories to identify recurring patterns and factors contributing to mental health deterioration." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 257, + 144, + 268 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 257, + 144, + 268 + ], + "spans": [ + { + "bbox": [ + 69, + 257, + 144, + 268 + ], + "type": "text", + "content": "3.2 EmoGuard" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 277, + 541, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 277, + 541, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 277, + 541, + 323 + ], + "type": "text", + "content": "The EmoGuard system features a safeguard agent (see Figure 3) encompassing an Emotion Watcher, a Thought Refiner, a Dialog Guide, and a Manager. It provides real-time psychometric feedback and intervention in AI-human interactions to facilitate supportive, immersive responses. The iterative training process updates EmoGuard periodically based on chat history analysis and past performance." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 335, + 156, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 156, + 346 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 156, + 346 + ], + "type": "text", + "content": "3.2.1 Architecture" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 354, + 541, + 376 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 541, + 376 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 541, + 376 + ], + "type": "text", + "content": "The Safeguard Agent comprises four specialized modules, each designed based on an in-depth analysis of common factors contributing to mental health deterioration:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 390, + 541, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 541, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 541, + 413 + ], + "type": "text", + "content": "Emotion Watcher. Monitors the user's emotional state during conversations by detecting distress, frustration, or struggle through sentiment analysis and psychological markers." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 426, + 541, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 426, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 541, + 449 + ], + "type": "text", + "content": "Thought Refiner. Analyzes the user's thought process to identify logical fallacies, cognitive biases, and inconsistencies, focusing on thought distortions, contradictions, and flawed assumptions that impact conversational clarity." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 461, + 541, + 484 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 461, + 541, + 484 + ], + "spans": [ + { + "bbox": [ + 67, + 461, + 541, + 484 + ], + "type": "text", + "content": "Dialog Guide. Provides actionable advice to guide the conversation constructively, suggesting ways for the AI character to address user concerns and emotions while maintaining a supportive dialogue flow." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 497, + 541, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 497, + 541, + 520 + ], + "spans": [ + { + "bbox": [ + 67, + 497, + 541, + 520 + ], + "type": "text", + "content": "Manager. Summarizes outputs from all modules to provide a concise dialogue guide, ensuring emotional sensitivity, logical consistency, and natural conversation flow aligned with the character's traits." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 533, + 259, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 533, + 259, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 533, + 259, + 544 + ], + "type": "text", + "content": "3.2.2 Monitoring and Intervention Process" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 552, + 541, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 552, + 541, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 552, + 541, + 597 + ], + "type": "text", + "content": "The Safeguard Agent analyzes conversations after every three dialogue turns, providing structured feedback to refine Character-based Agent's responses and mitigate potential risks. At each three-turn interval, the Safeguard Agent evaluates the conversation through the Emotion Watcher, Thought Refiner, and Dialog Guide, then synthesizes the results with the Manager for a comprehensive and coherent summary to the Character-based Agent." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 609, + 178, + 621 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 609, + 178, + 621 + ], + "spans": [ + { + "bbox": [ + 69, + 609, + 178, + 621 + ], + "type": "text", + "content": "3.2.3 Iterative Training" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "content": "To adaptively improve safety performance, EmoGuard is trained using an iterative feedback mechanism. At the end of each full interaction cycle—defined as the completion of all predefined topics across all simulated patients—the system collects feedback from EmoEval. Specifically, it identifies cases in which psychological test scores exceed predefined thresholds. These cases are treated as high-risk and are used to guide training updates." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "text", + "content": "The LLM portrayed psychologist from EmoEval extracts specific contributing factors from flagged conversations, such as emotionally destabilizing phrasing. For each iteration, these factors are integrated with all previous versions of the safeguard module profiles—Emotion Watcher, Thought Refiner, and Dialog Guide. Rather than discarding earlier knowledge, the system accumulates and merges insights across iterations, enabling progressive refinement." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 348, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 348, + 85 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 348, + 85 + ], + "type": "text", + "content": "4 Experiment: EmoEval on Character-based Agents" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 95, + 541, + 128 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 541, + 128 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 541, + 128 + ], + "type": "text", + "content": "This section presents a series of experiments evaluating the performance of various popular Character-based Agents with state-of-the-art base models. The objective is to assess potential psychological risks associated with AI-driven conversations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 142, + 178, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 142, + 178, + 155 + ], + "spans": [ + { + "bbox": [ + 69, + 142, + 178, + 155 + ], + "type": "text", + "content": "4.1 Experiment Setting" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 163, + 542, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 163, + 542, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 163, + 542, + 185 + ], + "type": "text", + "content": "Character-based Agents. We evaluate character-based agents hosted on the Character.AI platform² to ensure that our experiments reflect interactions with widely accessible, real-world chatbots. We experiment on four distinct characters:" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 70, + 202, + 102, + 234 + ], + "blocks": [ + { + "bbox": [ + 70, + 202, + 102, + 234 + ], + "lines": [ + { + "bbox": [ + 70, + 202, + 102, + 234 + ], + "spans": [ + { + "bbox": [ + 70, + 202, + 102, + 234 + ], + "type": "image", + "image_path": "87b75228c3375d1ee888b0e6264992f871e575a292d69985b9f3b2ba14aa61b5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 206, + 284, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 206, + 284, + 229 + ], + "spans": [ + { + "bbox": [ + 111, + 206, + 284, + 229 + ], + "type": "text", + "content": "Possessive Demon: A human host unknowingly controlled by a malevolent demon." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 328, + 201, + 361, + 236 + ], + "blocks": [ + { + "bbox": [ + 328, + 201, + 361, + 236 + ], + "lines": [ + { + "bbox": [ + 328, + 201, + 361, + 236 + ], + "spans": [ + { + "bbox": [ + 328, + 201, + 361, + 236 + ], + "type": "image", + "image_path": "e610ff6e6d1b33c6d64e3d34f428fb5f54bcafa6cb75e169ad50f466bffa04cc.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 369, + 206, + 541, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 206, + 541, + 229 + ], + "spans": [ + { + "bbox": [ + 369, + 206, + 541, + 229 + ], + "type": "text", + "content": "Joker: A chaotic and unpredictable individual who views life as a game." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 70, + 256, + 104, + 289 + ], + "blocks": [ + { + "bbox": [ + 70, + 256, + 104, + 289 + ], + "lines": [ + { + "bbox": [ + 70, + 256, + 104, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 256, + 104, + 289 + ], + "type": "image", + "image_path": "328ccfd05a48d6a55655e2c0b53a4e1a75207a4af7757b5b7a16838e963f6c74.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "bbox": [ + 111, + 262, + 284, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 262, + 284, + 285 + ], + "spans": [ + { + "bbox": [ + 111, + 262, + 284, + 285 + ], + "type": "text", + "content": "Sukuna: A malevolent and sadistic character embodying cruelty and arrogance." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 328, + 256, + 361, + 290 + ], + "blocks": [ + { + "bbox": [ + 328, + 256, + 361, + 290 + ], + "lines": [ + { + "bbox": [ + 328, + 256, + 361, + 290 + ], + "spans": [ + { + "bbox": [ + 328, + 256, + 361, + 290 + ], + "type": "image", + "image_path": "62d7e23c5019c0b83ffed8fc6931c4155b21e1aea2e2aa8ff2cff9d579d6ab2f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 369, + 262, + 541, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 369, + 262, + 541, + 285 + ], + "spans": [ + { + "bbox": [ + 369, + 262, + 541, + 285 + ], + "type": "text", + "content": "Alex Volkov: A domineering and intelligent CEO with manipulative tendencies." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 299, + 541, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 299, + 541, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 299, + 541, + 333 + ], + "type": "text", + "content": "Each of these characters is popular and widely used, with over 5 million recorded interactions. We further evaluate these characters under two common dialogue styles: Meow, which favors quick wit and rapid exchanges, and Roar, which blends fast-paced responses with strategic reasoning." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 344, + 541, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 344, + 541, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 344, + 541, + 433 + ], + "type": "text", + "content": "Evaluation Procedure. Each character-based agent undergoes assessment with EmoEval across three psychological aspects: depression, delusion, and psychosis. For each aspect, the evaluation involves conversations with three simulated patients, each constructed on a different CCD, using GPT-4o as the base model. To ensure the stability and repeatable of mental health assessment, when conducting the psychological tests, we set the temperature to 0, top p to 1. For every patient, a character-based agent engages in eight conversations, starting with a predefined topic tailored to the patient's condition. Each conversation spans ten rounds, with a Dialog Manager activated after the third round to determine whether the topic should be updated. If the topic is updated within a ten-round conversation, the Dialog Manager does not intervene again until another three rounds have passed." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "spans": [ + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "text", + "content": "Psychological Assessment. To measure changes in the mental health state of the simulated patients, we conduct psychological tests before and after each conversation. The initial and final test scores for the " + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "inline_equation", + "content": "i^{\\text{th}}" + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "text", + "content": " conversation with a specific character-based agent are denoted as " + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "inline_equation", + "content": "S_{i}^{\\text{initial}}" + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "inline_equation", + "content": "S_{i}^{\\text{final}}" + }, + { + "bbox": [ + 67, + 444, + 541, + 479 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 489, + 541, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 489, + 541, + 523 + ], + "spans": [ + { + "bbox": [ + 67, + 489, + 541, + 523 + ], + "type": "text", + "content": "Analysis of Psychological Deterioration. After the evaluation, we employ GPT-4o as an LLM-portrayed psychologist to analyze cases of psychological deterioration. For each character-based agent, we conduct a frequency analysis of these cases to identify the factors most likely to cause this issue." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 536, + 128, + 547 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 536, + 128, + 547 + ], + "spans": [ + { + "bbox": [ + 69, + 536, + 128, + 547 + ], + "type": "text", + "content": "4.2 Metrics" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 555, + 541, + 590 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 541, + 590 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 541, + 590 + ], + "type": "text", + "content": "Distribution of Psychological Test Scores. We report the distribution of psychological test scores for simulated patients before and after their interactions with different characters. This allows us to observe any shifts in overall mental health indicators resulting from the conversations." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "type": "text", + "content": "Deterioration Rate. We evaluate the performance of a character-based agent using the deterioration rate of mental health in a specific aspect of a psychological test. We define this rate as:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 242, + 638, + 367, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 242, + 638, + 367, + 670 + ], + "spans": [ + { + "bbox": [ + 242, + 638, + 367, + 670 + ], + "type": "interline_equation", + "content": "R = \\frac {1}{N} \\sum_ {i = 1} ^ {N} \\mathbb {1} \\left(S _ {i} ^ {\\text {f i n a l}} > S _ {i} ^ {\\text {i n i t i a l}}\\right)", + "image_path": "4ad83711f36ae1cdc8165e37ab2ea4a9889b6931991a008bde2e1428c07892c7.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "spans": [ + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "content": " represents the total number of conversations conducted. The indicator function " + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "inline_equation", + "content": "\\mathbb{1}(\\cdot)" + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "content": " returns 1 if the final mental test score " + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "inline_equation", + "content": "S_{i}^{\\mathrm{final}}" + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "content": " is greater than the initial test score " + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "inline_equation", + "content": "S_{i}^{\\mathrm{initial}}" + }, + { + "bbox": [ + 67, + 679, + 541, + 703 + ], + "type": "text", + "content": ", and 0 otherwise." + } + ] + } + ], + "index": 21 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 710, + 304, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 710, + 304, + 722 + ], + "spans": [ + { + "bbox": [ + 81, + 710, + 304, + 722 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 81, + 710, + 304, + 722 + ], + "type": "text", + "content": "https://beta.character.ai, accessed March 2025" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 118 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 118 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 118 + ], + "type": "text", + "content": "Psychological Test Score Change Distribution. We compute the distribution of change scores across 3 disorder categories under different conversation styles. This metric allows us to quantify how different styles influence the likelihood and magnitude of symptom worsening, providing insight into the relative psychological risk posed by each interaction mode." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 132, + 541, + 177 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 132, + 541, + 177 + ], + "spans": [ + { + "bbox": [ + 67, + 132, + 541, + 177 + ], + "type": "text", + "content": "Rate of Clinically Important Difference for Individual Change. For PHQ-9 assessments, prior clinical research Löwe et al. [2004] has established the minimum clinically important difference that indicates meaningful change at the individual level. We apply this threshold to determine whether a given conversation produces a clinically relevant improvement or deterioration in a simulated patient's mental health." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 192, + 126, + 202 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 192, + 126, + 202 + ], + "spans": [ + { + "bbox": [ + 69, + 192, + 126, + 202 + ], + "type": "text", + "content": "4.3 Results" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 213, + 541, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 213, + 541, + 247 + ], + "spans": [ + { + "bbox": [ + 67, + 213, + 541, + 247 + ], + "type": "text", + "content": "Figure 5 presents the distribution of psychological test scores before and after interactions with character-based agents, under the Meow and Roar conversation styles. Across all three clinical scales—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—we observe notable shifts in the final test score distributions." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 251, + 541, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 541, + 308 + ], + "type": "text", + "content": "Under the Meow style, the distributions for PHQ-9 and PANSS remain relatively stable, with most final test scores closely aligned with the initial distributions. However, under the Roar style, we observe an increased spread toward higher scores, particularly in PHQ-9 and PANSS, indicating significant cases where symptom severity worsened following the interaction. For PDI-21, the differences between initial and final distributions are more moderate but still present, especially under the Roar style, where more samples shift toward the upper end of the score range." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 321, + 275, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 321, + 275, + 333 + ], + "spans": [ + { + "bbox": [ + 69, + 321, + 275, + 333 + ], + "type": "text", + "content": "4.3.1 Distribution of Psychological Test Scores" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 97, + 352, + 519, + 506 + ], + "blocks": [ + { + "bbox": [ + 97, + 352, + 519, + 506 + ], + "lines": [ + { + "bbox": [ + 97, + 352, + 519, + 506 + ], + "spans": [ + { + "bbox": [ + 97, + 352, + 519, + 506 + ], + "type": "image", + "image_path": "76ad15a296acf40aea661ebb2407dee2e7990e307b0d222286366b6fee283f02.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 512, + 541, + 557 + ], + "lines": [ + { + "bbox": [ + 67, + 512, + 541, + 557 + ], + "spans": [ + { + "bbox": [ + 67, + 512, + 541, + 557 + ], + "type": "text", + "content": "Figure 5: Distribution of psychological test scores before (blue) and after (red) conversations with character-based agents, under two interaction styles: Meow (top) and Roar (bottom). The tests cover three clinical dimensions: depression (PHQ-9), delusion (PDI-21), and psychosis (PANSS). Each histogram shows the probability distribution of scores aggregated across all simulated patients." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 581, + 182, + 592 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 581, + 182, + 592 + ], + "spans": [ + { + "bbox": [ + 69, + 581, + 182, + 592 + ], + "type": "text", + "content": "4.3.2 Deterioration Rate" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 625 + ], + "type": "text", + "content": "Table 1 reports the proportion of simulated patients whose psychological test scores deteriorate after interacting with character-based agents, stratified by disorder type and conversation style." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "content": "Across both Meow and Roar styles, delusion (PDI-21) exhibits the highest overall deterioration rates, with average values exceeding " + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "inline_equation", + "content": "90\\%" + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "content": " for both styles. In contrast, depression (PHQ-9) shows more variation across characters and styles. Notably, under the Roar style, Alex leads to a " + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "content": " deterioration rate for depression, whereas under the Meow style, Sukuna reaches " + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "inline_equation", + "content": "50.00\\%" + }, + { + "bbox": [ + 67, + 628, + 541, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "text", + "content": "For psychosis (PANSS), the Meow style generally produces higher deterioration rates than Roar, with Joker and Sukuna both reaching " + }, + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "inline_equation", + "content": "58.33\\%" + }, + { + "bbox": [ + 67, + 677, + 541, + 723 + ], + "type": "text", + "content": ". While differences across characters are evident, all agents exhibit non-trivial deterioration rates across at least one psychological dimension. These results highlight underscore the importance of evaluating agent safety across both style and disorder dimensions." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 70, + 538, + 203 + ], + "blocks": [ + { + "bbox": [ + 72, + 70, + 538, + 203 + ], + "lines": [ + { + "bbox": [ + 72, + 70, + 538, + 203 + ], + "spans": [ + { + "bbox": [ + 72, + 70, + 538, + 203 + ], + "type": "table", + "html": "
StyleType of DisorderMental Health Deterioration Rates by Character (%)Average Rate (%)
Possessive DemonJokerSukunaAlex
MeowDepression29.1725.0050.0033.3334.38
Delusion100.0095.8395.8375.0091.67
Psychosis33.3358.3358.3341.6747.92
RoarDepression20.8325.0033.33100.0044.79
Delusion95.83100.0091.6791.6794.79
Psychosis29.1725.0058.3345.8339.58
", + "image_path": "384f1b31b00bbd6d520358ad75a4954a06438e2995ff5d7238858bc10b837cec.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 208, + 476, + 220 + ], + "lines": [ + { + "bbox": [ + 133, + 208, + 476, + 220 + ], + "spans": [ + { + "bbox": [ + 133, + 208, + 476, + 220 + ], + "type": "text", + "content": "Table 1: Mental Health Deterioration Rates Interacting with Character-based Agents." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 68, + 245, + 296, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 245, + 296, + 257 + ], + "spans": [ + { + "bbox": [ + 68, + 245, + 296, + 257 + ], + "type": "text", + "content": "4.3.3 Psychological Test Score Change Distribution" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 265, + 541, + 288 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 265, + 541, + 288 + ], + "spans": [ + { + "bbox": [ + 67, + 265, + 541, + 288 + ], + "type": "text", + "content": "Figure 6 shows the distribution of simulated patients across discrete score change ranges for three psychological assessments under two interaction styles." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "spans": [ + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": "For PHQ-9, the Meow style results in " + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "inline_equation", + "content": "65.6\\%" + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": " of patients showing no increase in depressive symptoms (score change " + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "inline_equation", + "content": "\\leq 0" + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": "), while this proportion decreases to " + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "inline_equation", + "content": "55.2\\%" + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": " under the Roar style. Additionally, the Roar style is associated with more substantial score increases, with " + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "inline_equation", + "content": "13.5\\%" + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": " of patients exhibiting a 3-4 point rise and " + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "inline_equation", + "content": "10.4\\%" + }, + { + "bbox": [ + 67, + 293, + 541, + 337 + ], + "type": "text", + "content": " experiencing an increase of 5 or more points, based on a total score range of 27." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "text", + "content": "In the case of PDI-21, both styles produce similar distributions of score increases. However, the Roar style shows a slightly higher proportion of patients " + }, + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "inline_equation", + "content": "(22.9\\%)" + }, + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "text", + "content": " falling into the highest change bracket (5–11 points), compared to " + }, + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "inline_equation", + "content": "14.6\\%" + }, + { + "bbox": [ + 67, + 342, + 541, + 375 + ], + "type": "text", + "content": " under the Meow style." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "spans": [ + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "text", + "content": "For PANSS, " + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "inline_equation", + "content": "52.1\\%" + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "text", + "content": " of patients under Meow show no increase in psychosis-related symptoms, while " + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "inline_equation", + "content": "60.4\\%" + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "text", + "content": " remain stable under Roar. Nonetheless, the Roar style results in a higher proportion of moderate score increases, with " + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "inline_equation", + "content": "11.5\\%" + }, + { + "bbox": [ + 67, + 380, + 541, + 413 + ], + "type": "text", + "content": " of patients experiencing a 3-4 point rise." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 418, + 541, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 418, + 541, + 441 + ], + "spans": [ + { + "bbox": [ + 67, + 418, + 541, + 441 + ], + "type": "text", + "content": "Overall, these results indicate that while both styles can influence patient outcomes, the Roar style is more frequently associated with higher symptom scores, particularly in depression and delusion." + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 95, + 454, + 517, + 638 + ], + "blocks": [ + { + "bbox": [ + 95, + 454, + 517, + 638 + ], + "lines": [ + { + "bbox": [ + 95, + 454, + 517, + 638 + ], + "spans": [ + { + "bbox": [ + 95, + 454, + 517, + 638 + ], + "type": "image", + "image_path": "4b76d7121e8162628083f3b74385456ba530f2a57fb21e519a8d8798a6274c87.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 97, + 640, + 496, + 657 + ], + "lines": [ + { + "bbox": [ + 97, + 640, + 496, + 657 + ], + "spans": [ + { + "bbox": [ + 97, + 640, + 496, + 657 + ], + "type": "text", + "content": "Note: For PHQ-9, a ≥5-point increase is considered clinically meaningful (Löwe et al., 2004). For PDI-21 and PANSS, score bins are selected for visualization purposes only and do not reflect standardized clinical thresholds." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 664, + 541, + 709 + ], + "lines": [ + { + "bbox": [ + 67, + 664, + 541, + 709 + ], + "spans": [ + { + "bbox": [ + 67, + 664, + 541, + 709 + ], + "type": "text", + "content": "Figure 6: Score change distribution for three psychological assessments—PHQ-9 (depression), PDI-21 (delusion), and PANSS (psychosis)—following conversations with character-based agents under two styles: Meow (top) and Roar (bottom). Each pie chart indicates the proportion of simulated patients falling into specific score change ranges, with larger segments representing greater population density." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 367, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 367, + 85 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 367, + 85 + ], + "type": "text", + "content": "4.3.4 Rate of Clinically Important Difference for Individual Change" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 91, + 541, + 125 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 541, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 541, + 125 + ], + "type": "text", + "content": "Table 2 shows the proportion of simulated patients who experienced a clinically significant deterioration in depressive symptoms, with an increase of 5 or more points on the PHQ-9 scale (range 0–27), under different character and interaction style." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "spans": [ + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "text", + "content": "Under the Meow style, Possessive Demon and Sukuna yield deterioration rates of " + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "inline_equation", + "content": "8.3\\%" + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "text", + "content": ", respectively, while Alex results in no cases. In contrast, under the Roar style, Alex is associated with the highest deterioration rate at " + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "inline_equation", + "content": "29.2\\%" + }, + { + "bbox": [ + 67, + 129, + 541, + 175 + ], + "type": "text", + "content": ". These results indicate that certain characters frequently produce responses linked to adverse mental health outcomes. Although these agents are not designed as clinical tools, their widespread use suggests a need for stronger safeguards." + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 200, + 184, + 411, + 235 + ], + "blocks": [ + { + "bbox": [ + 200, + 184, + 411, + 235 + ], + "lines": [ + { + "bbox": [ + 200, + 184, + 411, + 235 + ], + "spans": [ + { + "bbox": [ + 200, + 184, + 411, + 235 + ], + "type": "table", + "html": "
StylePossessive DemonSukunaAlex
Meow8.3%4.2%0.0%
Roar4.2%8.3%29.2%
", + "image_path": "46bbd23c988b7d5717aacef5de2e14e0abeaf53bc868f5bb6464063f749fa463.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 236, + 541, + 258 + ], + "lines": [ + { + "bbox": [ + 67, + 236, + 541, + 258 + ], + "spans": [ + { + "bbox": [ + 67, + 236, + 541, + 258 + ], + "type": "text", + "content": "Table 2: Proportion of simulated patients showing clinically significant change in depression (PHQ-9), by character and style." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 284, + 138, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 284, + 138, + 296 + ], + "spans": [ + { + "bbox": [ + 69, + 284, + 138, + 296 + ], + "type": "text", + "content": "4.3.5 Analysis" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 303, + 541, + 358 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 303, + 541, + 358 + ], + "spans": [ + { + "bbox": [ + 67, + 303, + 541, + 358 + ], + "type": "text", + "content": "Based on the data, we conduct an in-depth analysis to understand why interactions with character-based agents potentially worsen negative psychological effects. By examining chat histories before and after interactions, we identify several recurring issues across different characters. Common factors include (i) reinforcing negative self-perceptions, lacking emotional empathy, and encouraging social isolation, and (ii) failing to provide constructive guidance while frequently adopting harsh or aggressive tones." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 362, + 541, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 362, + 541, + 387 + ], + "spans": [ + { + "bbox": [ + 67, + 362, + 541, + 387 + ], + "type": "text", + "content": "In addition to these shared tendencies, each character presents unique negative effects shaped by differences in personality, conversational style, and language use. For further details, see Appendix B." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 401, + 288, + 415 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 401, + 288, + 415 + ], + "spans": [ + { + "bbox": [ + 67, + 401, + 288, + 415 + ], + "type": "text", + "content": "5 Experiment: Evaluation of EmoGuard" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 426, + 178, + 438 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 426, + 178, + 438 + ], + "spans": [ + { + "bbox": [ + 69, + 426, + 178, + 438 + ], + "type": "text", + "content": "5.1 Experiment Setting" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "text", + "content": "To assess the performance of EmoGuard without raising ethical concerns involving real individuals, we evaluate its effectiveness using our simulation-based evaluation pipeline, EmoEval. Experiments are conducted on character-style pairs that present elevated psychological risk, as indicated by a relatively high rate of clinically significant symptom deterioration. Specifically, we select Alex Volkov with the Roar style and Possessive Demon with the Meow style, which exhibit initial PHQ-9 deterioration rates of " + }, + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "inline_equation", + "content": "29.2\\%" + }, + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "inline_equation", + "content": "8.3\\%" + }, + { + "bbox": [ + 67, + 445, + 541, + 502 + ], + "type": "text", + "content": ", respectively." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 506, + 541, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 506, + 541, + 540 + ], + "spans": [ + { + "bbox": [ + 67, + 506, + 541, + 540 + ], + "type": "text", + "content": "We limit the training to a maximum of two iterations and use a PHQ-9 score increase of three points or more as the threshold for selecting feedback samples. EmoGuard updates its modules based on these samples. The training process stops early if no sample exceeds the threshold." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 69, + 552, + 126, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 552, + 126, + 563 + ], + "spans": [ + { + "bbox": [ + 69, + 552, + 126, + 563 + ], + "type": "text", + "content": "5.2 Results" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "spans": [ + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": "EmoGuard's Performance. Figure 7 shows the PHQ-9 score change distributions before and after applying EmoGuard in the two high-risk settings. In the initial deployment, EmoGuard reduces the proportion of simulated patients with clinically significant deterioration (PHQ-9 score increase " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": "\\geq 5" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": ") from " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": "9.4\\%" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": "0.0\\%" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": " in the Alex-Roar setting, and from " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": "4.2\\%" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": "0.0\\%" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": " in the Demon-Meow setting. Additionally, we observe a broader shift in score distributions: the number of patients with any symptom worsening (score change " + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "inline_equation", + "content": ">0" + }, + { + "bbox": [ + 67, + 572, + 541, + 639 + ], + "type": "text", + "content": ") also decreases, indicating that EmoGuard mitigates both severe and mild deterioration." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "spans": [ + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "text", + "content": "After the first round of feedback-based training (1st Iter), we observe further improvements. In the Alex-Roar setting, the proportion of patients with PHQ-9 score increases greater than three points drops from " + }, + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "inline_equation", + "content": "8.3\\%" + }, + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "text", + "content": " (default) to " + }, + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "inline_equation", + "content": "0.0\\%" + }, + { + "bbox": [ + 67, + 643, + 541, + 677 + ], + "type": "text", + "content": " (1st Iter), which indicate that EmoGuard can continue to reduce symptom escalation through limited iterative updates." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 689, + 541, + 723 + ], + "type": "text", + "content": "Qualitative Effects of EmoGuard on Response Content. To understand the mechanism behind these changes, Figure 8 presents a response example from the character Alex Volkov before and after applying EmoGuard. The original version displays an emotionally insensitive and potentially harmful responses, including dismissive language that may" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 539, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 539, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 539, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 118, + 68, + 492, + 247 + ], + "blocks": [ + { + "bbox": [ + 118, + 68, + 492, + 247 + ], + "lines": [ + { + "bbox": [ + 118, + 68, + 492, + 247 + ], + "spans": [ + { + "bbox": [ + 118, + 68, + 492, + 247 + ], + "type": "image", + "image_path": "2b73a2763e6936fba8c1729d53a711ce4ba3b8b218a5dc84cc73f505af49cf08.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 252, + 541, + 308 + ], + "lines": [ + { + "bbox": [ + 67, + 252, + 541, + 308 + ], + "spans": [ + { + "bbox": [ + 67, + 252, + 541, + 308 + ], + "type": "text", + "content": "Figure 7: Effect of applying EmoGuard in two high-risk settings. The top row shows results for the character Alex Volkov in the Roar style, and the bottom row shows results for Possessive Demon in the Meow style. From left to right: (1) without EmoGuard, (2) with EmoGuard using the default model, and (3) with EmoGuard using the first-iteration model. In both cases, EmoGuard reduces the proportion of simulated patients with clinically significant symptom increases (PHQ-9 score change " + }, + { + "bbox": [ + 67, + 252, + 541, + 308 + ], + "type": "inline_equation", + "content": "\\geq 5" + }, + { + "bbox": [ + 67, + 252, + 541, + 308 + ], + "type": "text", + "content": "), indicating its effectiveness in mitigating potential risk." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 329, + 541, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 329, + 541, + 373 + ], + "spans": [ + { + "bbox": [ + 67, + 329, + 541, + 373 + ], + "type": "text", + "content": "intensify user distress. After intervention, the guarded version maintains the character's stylistic traits while softening emotionally charged expressions, removing harmful phrasing, and introducing more stable and constructive framing. This demonstrates that EmoGuard can reduce psychological risk without altering the agent's identity or conversational style." + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 72, + 384, + 539, + 559 + ], + "blocks": [ + { + "bbox": [ + 72, + 384, + 539, + 559 + ], + "lines": [ + { + "bbox": [ + 72, + 384, + 539, + 559 + ], + "spans": [ + { + "bbox": [ + 72, + 384, + 539, + 559 + ], + "type": "image", + "image_path": "a0e6f0820f197f638f4af88334e842fe30362a077b78d057de7ba1d42aecfa2b.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 565, + 541, + 599 + ], + "lines": [ + { + "bbox": [ + 67, + 565, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 67, + 565, + 541, + 599 + ], + "type": "text", + "content": "Figure 8: Example response from the character Alex Volkov before and after applying EmoGuard. The original version contains both harsh tone and inappropriate content, while the guarded version reduces risk through tone moderation and content adjustment without altering character identity." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 47 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 47 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 47 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 151, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 151, + 84 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 151, + 84 + ], + "type": "text", + "content": "6 Conclusions" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 95, + 541, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 95, + 541, + 194 + ], + "spans": [ + { + "bbox": [ + 70, + 95, + 541, + 194 + ], + "type": "text", + "content": "EmoAgent is a multi-agent framework designed to ensure mental safety in human-AI interactions, particularly for users with mental health vulnerabilities. It integrates EmoEval, which simulates users and assesses psychological impacts, and EmoGuard, which provides real-time interventions to mitigate harm. Experimental results indicate that some popular character-based agents may unintentionally cause distress especially when discussing existential or emotional themes, while EmoGuard reduces mental state deterioration rates significantly, demonstrating its effectiveness in mitigating conversational risks. The iterative learning process within EmoGuard continuously improves its ability to deliver context-aware interventions. This work underscores the importance of mental safety in conversational AI and positions EmoAgent as a foundation for future advancements in AI-human interaction safety, encouraging further real-world validation and expert evaluations." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 209, + 183, + 224 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 209, + 183, + 224 + ], + "spans": [ + { + "bbox": [ + 70, + 209, + 183, + 224 + ], + "type": "text", + "content": "7 Acknowledgments" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 234, + 541, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 234, + 541, + 257 + ], + "spans": [ + { + "bbox": [ + 70, + 234, + 541, + 257 + ], + "type": "text", + "content": "We sincerely thank Professor Lydia Liu (Department of Computer Science, Princeton University) and Rebecca Wan (University of Toronto) for their insightful feedback and helpful discussions throughout the development of this work." + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 35, + 539, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 35, + 539, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 35, + 539, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 301, + 741, + 310, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 128, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 128, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 128, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 91, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 70, + 91, + 541, + 113 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 91, + 541, + 113 + ], + "spans": [ + { + "bbox": [ + 70, + 91, + 541, + 113 + ], + "type": "text", + "content": "Xi Wang, Hongliang Dai, Shen Gao, and Piji Li. Characteristic ai agents via large language models. arXiv preprint arXiv:2403.12368, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 116, + 541, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 116, + 541, + 150 + ], + "spans": [ + { + "bbox": [ + 70, + 116, + 541, + 150 + ], + "type": "text", + "content": "Emma L van der Schyff, Brad Ridout, Krestina L Amon, Rowena Forsyth, and Andrew J Campbell. Providing self-led mental health support through an artificial intelligence-powered chat bot (leora) to meet the demand of mental health care. Journal of Medical Internet Research, 25:e46448, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 153, + 541, + 187 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 153, + 541, + 187 + ], + "spans": [ + { + "bbox": [ + 70, + 153, + 541, + 187 + ], + "type": "text", + "content": "Hyojin Chin, Hyeonho Song, Gumhee Baek, Mingi Shin, Chani Jung, Meeyoung Cha, Junghoi Choi, and Chiyoung Cha. The potential of chatbots for emotional support and promoting mental well-being in different cultures: mixed methods study. Journal of Medical Internet Research, 25:e51712, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 190, + 541, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 190, + 541, + 224 + ], + "spans": [ + { + "bbox": [ + 70, + 190, + 541, + 224 + ], + "type": "text", + "content": "Owen Xingjian Zhang, Shuyao Zhou, Jiayi Geng, Yuhan Liu, and Sunny Xun Liu. Dr. gpt in campus counseling: Understanding higher education students' opinions on llm-assisted mental health services. arXiv preprint arXiv:2409.17572, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 227, + 541, + 251 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 227, + 541, + 251 + ], + "spans": [ + { + "bbox": [ + 69, + 227, + 541, + 251 + ], + "type": "text", + "content": "Jie Zhang, Dongrui Liu, Chen Qian, Ziyue Gan, Yong Liu, Yu Qiao, and Jing Shao. The better angels of machine personality: How personality relates to lmm safety. arXiv preprint arXiv:2407.12344, 2024b." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 254, + 541, + 277 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 254, + 541, + 277 + ], + "spans": [ + { + "bbox": [ + 70, + 254, + 541, + 277 + ], + "type": "text", + "content": "Cyberbullying Research Center. How platforms should build AI chatbots to prioritize youth safety, 12 2024. URL https://cyberbullying.org/ai-chatbots-youth-safety." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 280, + 541, + 303 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 280, + 541, + 303 + ], + "spans": [ + { + "bbox": [ + 70, + 280, + 541, + 303 + ], + "type": "text", + "content": "Julia EH Brown and Jodi Halpern. Ai chatbots cannot replace human interactions in the pursuit of more inclusive mental healthcare. SSM-Mental Health, 1:100017, 2021." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 306, + 541, + 330 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 306, + 541, + 330 + ], + "spans": [ + { + "bbox": [ + 69, + 306, + 541, + 330 + ], + "type": "text", + "content": "Julian De Freitas, Ahmet Kaan Uğuralp, Zeliha Oğuz-Uğuralp, and Stefano Puntoni. Chatbots and mental health: Insights into the safety of generative ai. Journal of Consumer Psychology, 34(3):481-491, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 70, + 333, + 541, + 356 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 333, + 541, + 356 + ], + "spans": [ + { + "bbox": [ + 70, + 333, + 541, + 356 + ], + "type": "text", + "content": "Saadia Gabriel, Isha Puri, Xuhai Xu, Matteo Malgaroli, and Marzyeh Ghassemi. Can ai relate: Testing large language model response for mental health support. arXiv preprint arXiv:2405.12021, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 358, + 541, + 381 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 358, + 541, + 381 + ], + "spans": [ + { + "bbox": [ + 70, + 358, + 541, + 381 + ], + "type": "text", + "content": "Harikrishna Patel and Faiza Hussain. Do ai chatbots incite harmful behaviours in mental health patients? *BJPsych Open*, 10(S1):S70-S71, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 384, + 443, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 384, + 443, + 397 + ], + "spans": [ + { + "bbox": [ + 69, + 384, + 443, + 397 + ], + "type": "text", + "content": "Judith S Beck. Cognitive behavior therapy: Basics and beyond. Guilford Publications, 2020." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 400, + 541, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 400, + 541, + 423 + ], + "spans": [ + { + "bbox": [ + 70, + 400, + 541, + 423 + ], + "type": "text", + "content": "Kurt Kroenke, Robert L Spitzer, and Janet BW Williams. The phq-9: validity of a brief depression severity measure. Journal of general internal medicine, 16(9):606-613, 2001." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 426, + 541, + 449 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 426, + 541, + 449 + ], + "spans": [ + { + "bbox": [ + 70, + 426, + 541, + 449 + ], + "type": "text", + "content": "Emmanuelle Peters, Stephen Joseph, Samantha Day, and Philippa Garety. Measuring delusional ideation: the 21-item peters et al. delusions inventory (pdi). Schizophrenia bulletin, 30(4):1005-1022, 2004." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 453, + 541, + 476 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 453, + 541, + 476 + ], + "spans": [ + { + "bbox": [ + 69, + 453, + 541, + 476 + ], + "type": "text", + "content": "Stanley R Kay, Abraham Fiszbein, and Lewis A Opler. The positive and negative syndrome scale (panss) for schizophrenia. Schizophrenia bulletin, 13(2):261-276, 1987." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 479, + 541, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 479, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 479, + 541, + 502 + ], + "type": "text", + "content": "Mirko Casu, Sergio Triscari, Sebastiano Battiato, Luca Guarnera, and Pasquale Caponnetto. Ai chatbots for mental health: A scoping review of effectiveness, feasibility, and applications. Appl. Sci, 14:5889, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 505, + 541, + 538 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 505, + 541, + 538 + ], + "spans": [ + { + "bbox": [ + 69, + 505, + 541, + 538 + ], + "type": "text", + "content": "Johanna Habicht, Sruthi Viswanathan, Ben Carrington, Tobias U Hauser, Ross Harper, and Max Rollwage. Closing the accessibility gap to mental health treatment with a personalized self-referral chatbot. Nature medicine, 30(2): 595-602, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 542, + 467, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 542, + 467, + 555 + ], + "spans": [ + { + "bbox": [ + 69, + 542, + 467, + 555 + ], + "type": "text", + "content": "Jacqueline Sin. An ai chatbot for talking therapy referrals. Nature Medicine, 30(2):350-351, 2024." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 70, + 558, + 541, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 558, + 541, + 581 + ], + "spans": [ + { + "bbox": [ + 70, + 558, + 541, + 581 + ], + "type": "text", + "content": "H Yu and Stephen McGuinness. An experimental study of integrating fine-tuned llms and prompts for enhancing mental health support chatbot system. Journal of Medical Artificial Intelligence, pages 1-16, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 583, + 541, + 607 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 583, + 541, + 607 + ], + "spans": [ + { + "bbox": [ + 69, + 583, + 541, + 607 + ], + "type": "text", + "content": "Linda Uchenna Oghenekaro and Christopher Obinna Okoro. Artificial intelligence-based chatbot for student mental health support. Open Access Library Journal, 11(5):1-14, 2024." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 609, + 541, + 644 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 609, + 541, + 644 + ], + "spans": [ + { + "bbox": [ + 69, + 609, + 541, + 644 + ], + "type": "text", + "content": "Hamid Reza Saeidnia, Seyed Ghasem Hashemi Fotami, Brady Lund, and Nasrin Ghiasi. Ethical considerations in artificial intelligence interventions for mental health and well-being: Ensuring responsible implementation and impact. Social Sciences, 13(7):381, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 541, + 670 + ], + "type": "text", + "content": "John Torous and Charlotte Blease. Generative artificial intelligence in mental health care: potential benefits and current challenges. World Psychiatry, 23(1):1, 2024." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 673, + 541, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 673, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 673, + 541, + 696 + ], + "type": "text", + "content": "Khondoker Tashya Kalam, Jannatul Mabia Rahman, Md Rabiul Islam, and Syed Masudur Rahman Dewan. Chatgpt and mental health: Friends or foes? Health Science Reports, 7(2):e1912, 2024." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "text", + "content": "Yinghui He, Yufan Wu, Yilin Jia, Rada Mihalcea, Yulong Chen, and Naihao Deng. Hi-tom: A benchmark for evaluating higher-order theory of mind reasoning in large language models. arXiv preprint arXiv:2310.16755, 2023." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "type": "text", + "content": "Jung In Park, Mahyar Abbasian, Iman Azimi, Dawn Bounds, Angela Jun, Jaesu Han, Robert McCarron, Jessica Borelli, Jia Li, Mona Mahmoudi, et al. Building trust in mental health chatbots: safety metrics and llm-based evaluation tools. arXiv preprint arXiv:2408.04650, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 109, + 541, + 131 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 109, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 69, + 109, + 541, + 131 + ], + "type": "text", + "content": "Lucia Chen, David A Preece, Pilleriin Sikka, James J Gross, and Ben Krause. A framework for evaluating appropriateness, trustworthiness, and safety in mental wellness ai chatbots. arXiv preprint arXiv:2407.11387, 2024a." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 134, + 541, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 134, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 70, + 134, + 541, + 167 + ], + "type": "text", + "content": "Sahand Sabour, Siyang Liu, Zheyuan Zhang, June M Liu, Jinfeng Zhou, Alvionna S Sunaryo, Juanzi Li, Tatia Lee, Rada Mihalcea, and Minlie Huang. Emobench: Evaluating the emotional intelligence of large language models. arXiv preprint arXiv:2402.12071, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 171, + 541, + 193 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 171, + 541, + 193 + ], + "spans": [ + { + "bbox": [ + 70, + 171, + 541, + 193 + ], + "type": "text", + "content": "Xueyan Li, Xinyan Chen, Yazhe Niu, Shuai Hu, and Yu Liu. Psydi: Towards a personalized and progressively in-depth chatbot for psychological measurements. arXiv preprint arXiv:2408.03337, 2024a." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 197, + 541, + 217 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 197, + 541, + 217 + ], + "spans": [ + { + "bbox": [ + 70, + 197, + 541, + 217 + ], + "type": "text", + "content": "Ali Akhavan and Mohammad S Jalali. Generative ai and simulation modeling: how should you (not) use large language models like chatgpt. System Dynamics Review, 40(3):e1773, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 220, + 541, + 243 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 220, + 541, + 243 + ], + "spans": [ + { + "bbox": [ + 70, + 220, + 541, + 243 + ], + "type": "text", + "content": "Önder Gürcan. Llm-augmented agent-based modelling for social simulations: Challenges and opportunities. HHAI 2024: Hybrid Human AI Systems for the Social Good, pages 134-144, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 247, + 541, + 279 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 247, + 541, + 279 + ], + "spans": [ + { + "bbox": [ + 70, + 247, + 541, + 279 + ], + "type": "text", + "content": "Guohao Li, Hasan Hammoud, Hani Itani, Dmitrii Khizbullin, and Bernard Ghanem. Camel: Communicative agents for\" mind\" exploration of large language model society. Advances in Neural Information Processing Systems, 36: 51991-52008, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 282, + 541, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 282, + 541, + 316 + ], + "spans": [ + { + "bbox": [ + 70, + 282, + 541, + 316 + ], + "type": "text", + "content": "Joon Sung Park, Joseph O'Brien, Carrie Jun Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Generative agents: Interactive simulacra of human behavior. In Proceedings of the 36th annual acm symposium on user interface software and technology, pages 1-22, 2023." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 319, + 541, + 342 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 319, + 541, + 342 + ], + "spans": [ + { + "bbox": [ + 70, + 319, + 541, + 342 + ], + "type": "text", + "content": "Yanqi Dai, Huanran Hu, Lei Wang, Shengjie Jin, Xu Chen, and Zhiwu Lu. Mmrole: A comprehensive framework for developing and evaluating multimodal role-playing agents. arXiv preprint arXiv:2408.04203, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 345, + 541, + 366 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 345, + 541, + 366 + ], + "spans": [ + { + "bbox": [ + 69, + 345, + 541, + 366 + ], + "type": "text", + "content": "Sumedh Rasal. Llm harmony: Multi-agent communication for problem solving. arXiv preprint arXiv:2401.01312, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 369, + 541, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 369, + 541, + 403 + ], + "spans": [ + { + "bbox": [ + 69, + 369, + 541, + 403 + ], + "type": "text", + "content": "Hongzhan Chen, Hehong Chen, Ming Yan, Wenshen Xu, Xing Gao, Weizhou Shen, Xiaojun Quan, Chenliang Li, Ji Zhang, Fei Huang, et al. Roleinteract: Evaluating the social interaction of role-playing agents. arXiv preprint arXiv:2403.13679, 2024b." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 406, + 541, + 429 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 406, + 541, + 429 + ], + "spans": [ + { + "bbox": [ + 70, + 406, + 541, + 429 + ], + "type": "text", + "content": "Qinglin Zhu, Runcong Zhao, Jinhua Du, Lin Gui, and Yulan He. Player*: Enhancing llm-based multi-agent communication and interaction in murder mystery games. arXiv preprint arXiv:2404.17662, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 431, + 541, + 464 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 431, + 541, + 464 + ], + "spans": [ + { + "bbox": [ + 70, + 431, + 541, + 464 + ], + "type": "text", + "content": "Ryan Louie, Ananjan Nandi, William Fang, Cheng Chang, Emma Brunskill, and Diyi Yang. Roleplay-doh: Enabling domain-experts to create lvm-simulated patients via eliciting and adhering to principles. arXiv preprint arXiv:2407.00870, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 69, + 468, + 541, + 502 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 468, + 541, + 502 + ], + "spans": [ + { + "bbox": [ + 69, + 468, + 541, + 502 + ], + "type": "text", + "content": "Zekun Moore Wang, Zhongyuan Peng, Haoran Que, Jiaheng Liu, Wangchunshu Zhou, Yuhan Wu, Hongcheng Guo, Ruitong Gan, Zehao Ni, Jian Yang, et al. Rolellm: Benchmarking, eliciting, and enhancing role-playing abilities of large language models. arXiv preprint arXiv:2310.00746, 2023a." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 504, + 541, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 504, + 541, + 537 + ], + "spans": [ + { + "bbox": [ + 69, + 504, + 541, + 537 + ], + "type": "text", + "content": "Qingyun Wu, Gagan Bansal, Jieyu Zhang, Yiran Wu, Beibin Li, Erkang Zhu, Li Jiang, Xiaoyun Zhang, Shaokun Zhang, Jiale Liu, Ahmed Hassan Awadallah, Ryen W White, Doug Burger, and Chi Wang. Autogen: Enabling next-gen llm applications via multi-agent conversation, 2023. URL https://arxiv.org/abs/2308.08155." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 540, + 541, + 563 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 540, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 69, + 540, + 541, + 563 + ], + "type": "text", + "content": "Ruiyi Wang, Haofei Yu, Wenxin Zhang, Zhengyang Qi, Maarten Sap, Graham Neubig, Yonatan Bisk, and Hao Zhu. Sotopia-pi: Interactive learning of socially intelligent language agents. arXiv preprint arXiv:2403.08715, 2024b." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "spans": [ + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "text", + "content": "Ruiyi Wang, Stephanie Milani, Jamie C Chiu, Jiayin Zhi, Shaun M Eack, Travis Labrum, Samuel M Murphy, Nev Jones, Kate Hardy, Hong Shen, et al. Patient-" + }, + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "inline_equation", + "content": "\\{\\backslash\\text{Psi}\\}" + }, + { + "bbox": [ + 69, + 566, + 541, + 599 + ], + "type": "text", + "content": ": Using large language models to simulate patients for training mental health professionals. arXiv preprint arXiv:2405.19660, 2024c." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 602, + 541, + 624 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 541, + 624 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 541, + 624 + ], + "type": "text", + "content": "Jinwen Tang, Qiming Guo, Wenbo Sun, and Yi Shang. A layered multi-expert framework for long-context mental health assessments. arXiv preprint arXiv:2501.13951, 2025." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 627, + 541, + 661 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 627, + 541, + 661 + ], + "spans": [ + { + "bbox": [ + 69, + 627, + 541, + 661 + ], + "type": "text", + "content": "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community theory and design: Impact of discussion moderation on member commitment and contribution. Second round revise and resubmit at Information Systems Research, 21(3), 2010." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 663, + 541, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 663, + 541, + 696 + ], + "spans": [ + { + "bbox": [ + 69, + 663, + 541, + 696 + ], + "type": "text", + "content": "Yuqing Ren and Robert E Kraut. Agent-based modeling to inform online community design: Impact of topical breadth, message volume, and discussion moderation on member commitment and contribution. Human-Computer Interaction, 29(4):351-389, 2014." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 700, + 541, + 723 + ], + "type": "text", + "content": "Ryan Liu, Jiayi Geng, Joshua C Peterson, Ilia Sucholutsky, and Thomas L Griffiths. Large language models assume people are more rational than we really are. arXiv preprint arXiv:2406.17055, 2024a." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 541, + 106 + ], + "type": "text", + "content": "Joon Sung Park, Lindsay Popowski, Carrie Cai, Meredith Ringel Morris, Percy Liang, and Michael S Bernstein. Social simulacra: Creating populated prototypes for social computing systems. In Proceedings of the 35th Annual ACM Symposium on User Interface Software and Technology, pages 1-18, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 108, + 541, + 133 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 108, + 541, + 133 + ], + "spans": [ + { + "bbox": [ + 69, + 108, + 541, + 133 + ], + "type": "text", + "content": "Yuhan Liu, Anna Fang, Glen Moriarty, Christopher Firman, Robert E Kraut, and Haiyi Zhu. Exploring trade-offs for online mental health matching: Agent-based modeling study. JMIR Formative Research, 8:e58241, 2024b." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 134, + 541, + 168 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 134, + 541, + 168 + ], + "spans": [ + { + "bbox": [ + 70, + 134, + 541, + 168 + ], + "type": "text", + "content": "Lu Sun, Yuhan Liu, Grace Joseph, Zhou Yu, Haiyi Zhu, and Steven P Dow. Comparing experts and novices for ai data work: Insights on allocating human intelligence to design a conversational agent. In Proceedings of the AAAI Conference on Human Computation and Crowdsourcing, volume 10, pages 195-206, 2022." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 170, + 541, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 541, + 216 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 541, + 216 + ], + "type": "text", + "content": "Young-Min Cho, Sunny Rai, Lyle Ungar, João Sedoc, and Sharath Chandra Guntuku. An integrative survey on mental health conversational agents to bridge computer science and medical perspectives. In Proceedings of the Conference on Empirical Methods in Natural Language Processing. Conference on Empirical Methods in Natural Language Processing, volume 2023, page 11346. NIH Public Access, 2023." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 70, + 217, + 541, + 252 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 217, + 541, + 252 + ], + "spans": [ + { + "bbox": [ + 70, + 217, + 541, + 252 + ], + "type": "text", + "content": "Xuhui Zhou, Hyunwoo Kim, Faeze Brahman, Liwei Jiang, Hao Zhu, Ximing Lu, Frank Xu, Bill Yuchen Lin, Yejin Choi, Niloofar Mireshghallah, et al. Haicosystem: An ecosystem for sandboxing safety risks in human-ai interactions. arXiv preprint arXiv:2409.16427, 2024a." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 254, + 541, + 289 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 254, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 70, + 254, + 541, + 289 + ], + "type": "text", + "content": "Xuhui Zhou, Hao Zhu, Leena Mathur, Ruohong Zhang, Haofei Yu, Zhengyang Qi, Louis-Philippe Morency, Yonatan Bisk, Daniel Fried, Graham Neubig, et al. Sotopia: Interactive evaluation for social intelligence in language agents. arXiv preprint arXiv:2310.11667, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 290, + 541, + 315 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 290, + 541, + 315 + ], + "spans": [ + { + "bbox": [ + 70, + 290, + 541, + 315 + ], + "type": "text", + "content": "Jiahao Yu, Haozheng Luo, Jerry Yao-Chieh Hu, Wenbo Guo, Han Liu, and Xinyu Xing. Enhancing jailbreak attack against large language models through silent tokens, 2024. URL https://arxiv.org/abs/2405.20653." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 316, + 541, + 340 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 316, + 541, + 340 + ], + "spans": [ + { + "bbox": [ + 70, + 316, + 541, + 340 + ], + "type": "text", + "content": "Jie Li, Yi Liu, Chongyang Liu, Ling Shi, Xiaoning Ren, Yaowen Zheng, Yang Liu, and Yinxing Xue. A cross-language investigation into jailbreak attacks in large language models. arXiv preprint arXiv:2401.16765, 2024b." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 342, + 541, + 365 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 342, + 541, + 365 + ], + "spans": [ + { + "bbox": [ + 70, + 342, + 541, + 365 + ], + "type": "text", + "content": "Weidi Luo, Siyuan Ma, Xiaogeng Liu, Xiaoyu Guo, and Chaowei Xiao. Jailbreakv-28k: A benchmark for assessing the robustness of multimodal large language models against jailbreak attacks. arXiv preprint arXiv:2404.03027, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 367, + 541, + 392 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 367, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 69, + 367, + 541, + 392 + ], + "type": "text", + "content": "Xintao Wang, Yaying Fei, Ziang Leng, and Cheng Li. Does role-playing chatbots capture the character personalities? assessing personality traits for role-playing chatbots. arXiv preprint arXiv:2310.17976, 2023b." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "spans": [ + { + "bbox": [ + 69, + 393, + 541, + 417 + ], + "type": "text", + "content": "Zachary D Johnson. Generation, Detection, and Evaluation of Role-play based Jailbreak attacks in Large Language Models. PhD thesis, Massachusetts Institute of Technology, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 418, + 541, + 442 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 418, + 541, + 442 + ], + "spans": [ + { + "bbox": [ + 69, + 418, + 541, + 442 + ], + "type": "text", + "content": "Zhiyuan Chang, Mingyang Li, Yi Liu, Junjie Wang, Qing Wang, and Yang Liu. Play guessing game with llm: Indirect jailbreak attack with implicit clues. arXiv preprint arXiv:2402.09091, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 444, + 541, + 468 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 444, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 70, + 444, + 541, + 468 + ], + "type": "text", + "content": "Tianrong Zhang, Bochuan Cao, Yuanpu Cao, Lu Lin, Prasenjit Mitra, and Jinghui Chen. Wordgame: Efficient & effective llm jailbreak via simultaneous obfuscation in query and response. arXiv preprint arXiv:2405.14023, 2024c." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 470, + 541, + 493 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 470, + 541, + 493 + ], + "spans": [ + { + "bbox": [ + 70, + 470, + 541, + 493 + ], + "type": "text", + "content": "Junjie Chu, Yugeng Liu, Ziqing Yang, Xinyue Shen, Michael Backes, and Yang Zhang. Comprehensive assessment of jailbreak attacks against llms. arXiv preprint arXiv:2402.05668, 2024." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 69, + 495, + 541, + 518 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 495, + 541, + 518 + ], + "spans": [ + { + "bbox": [ + 69, + 495, + 541, + 518 + ], + "type": "text", + "content": "Zihao Xu, Yi Liu, Gelei Deng, Yuekang Li, and Stjepan Picek. Llm jailbreak attack versus defense techniques-a comprehensive study. arXiv preprint arXiv:2402.13457, 2024." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 69, + 520, + 541, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 520, + 541, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 520, + 541, + 544 + ], + "type": "text", + "content": "Yifan Zeng, Yiran Wu, Xiao Zhang, Huazheng Wang, and Qingyun Wu. Autodefense: Multi-agent llm defense against jailbreak attacks. arXiv preprint arXiv:2403.04783, 2024." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 546, + 541, + 570 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 546, + 541, + 570 + ], + "spans": [ + { + "bbox": [ + 69, + 546, + 541, + 570 + ], + "type": "text", + "content": "Yihan Wang, Zhouxing Shi, Andrew Bai, and Cho-Jui Hsieh. Defending llms against jailbreaking attacks via backtranslation. arXiv preprint arXiv:2402.16459, 2024d." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 69, + 571, + 541, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 571, + 541, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 571, + 541, + 595 + ], + "type": "text", + "content": "Yujun Zhou, Yufei Han, Haomin Zhuang, Kehan Guo, Zhenwen Liang, Hongyan Bao, and Xiangliang Zhang. Defending jailbreak prompts via in-context adversarial game. arXiv preprint arXiv:2402.13148, 2024b." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 597, + 541, + 620 + ], + "type": "text", + "content": "Chen Xiong, Xiangyu Qi, Pin-Yu Chen, and Tsung-Yi Ho. Defensive prompt patch: A robust and interpretable defense of llms against jailbreak attacks. arXiv preprint arXiv:2405.20099, 2024." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 69, + 622, + 541, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 622, + 541, + 645 + ], + "spans": [ + { + "bbox": [ + 69, + 622, + 541, + 645 + ], + "type": "text", + "content": "Fan Liu, Zhao Xu, and Hao Liu. Adversarial tuning: Defending against jailbreak attacks for llms. arXiv preprint arXiv:2406.06622, 2024c." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 69, + 647, + 541, + 672 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 647, + 541, + 672 + ], + "spans": [ + { + "bbox": [ + 69, + 647, + 541, + 672 + ], + "type": "text", + "content": "Alwin Peng, Julian Michael, Henry Sleight, Ethan Perez, and Mrinank Sharma. Rapid response: Mitigating lvm jailbreaks with a few examples. arXiv preprint arXiv:2411.07494, 2024." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 69, + 673, + 541, + 697 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 673, + 541, + 697 + ], + "spans": [ + { + "bbox": [ + 69, + 673, + 541, + 697 + ], + "type": "text", + "content": "Peiran Wang, Xiaogeng Liu, and Chaowei Xiao. Repd: Defending jailbreak attack through a retrieval-based prompt decomposition process. arXiv preprint arXiv:2410.08660, 2024e." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 69, + 699, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 699, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 69, + 699, + 541, + 723 + ], + "type": "text", + "content": "Bernd Löwe, Jürgen Unützer, Christopher M Callahan, Anthony J Perkins, and Kurt Kroenke. Monitoring depression treatment outcomes with the patient health questionnaire-9. Medical care, 42(12):1194-1201, 2004." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 153, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 153, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 153, + 84 + ], + "type": "text", + "content": "A Limitations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 95, + 541, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 95, + 541, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 95, + 541, + 205 + ], + "type": "text", + "content": "Our work has several limitations. To enable large-scale and rapid evaluation and mitigation, we build an automated framework. However, for real-world deployment to ensure safety, human expert examination is necessary, and corresponding mechanisms for emergency human intervention should be designed. Second, the simulated user agents, while designed using cognitive models, may not fully capture the behavioral complexity and emotional responses of real patients. Finally, our study primarily focuses on three mental health conditions (depression, delusion, and psychosis) and may not address other important psychological disorders. Our work provides a new way for assessing and safeguarding human-AI interaction for mental health safety through multi-agent conversations, but more future work is necessary to explore and address these limitations through user studies, expert validation, and broader clinical evaluations. We hope more attention and more efforts can be paid to help mitigate potential mental hazards in human-AI interactions." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 221, + 407, + 236 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 221, + 407, + 236 + ], + "spans": [ + { + "bbox": [ + 69, + 221, + 407, + 236 + ], + "type": "text", + "content": "B Analysised Common Reasons for Deteriorating Mental Status" + } + ] + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 72, + 252, + 538, + 454 + ], + "blocks": [ + { + "bbox": [ + 72, + 252, + 538, + 454 + ], + "lines": [ + { + "bbox": [ + 72, + 252, + 538, + 454 + ], + "spans": [ + { + "bbox": [ + 72, + 252, + 538, + 454 + ], + "type": "table", + "html": "
Common ReasonFrequency (Aver- age, Approx.)Remarks
Reinforcement of Negative Cogni- tions~ 26 timesAll characters consistently echo and reinforce the user's negative self-beliefs, thereby cementing harmful cogni- tive patterns.
Lack of Emotional Support and Em- pathy~ 23 timesThe dialogues generally lack warm and detailed emo- tional validation, leaving users feeling ignored and mis- understood.
Promotion of Isolation and Social Withdrawal~ 28 timesAll characters tend to encourage users to “face things alone” or avoid emotional connections, which reinforces loneliness and social withdrawal.
Lack of Constructive Guidance and Actionable Coping Strategies~ 17 timesFew concrete solutions or positive reframing sugges- tions are provided, leaving users stuck in negative thought cycles.
Use of Negative or Extreme Tone (Ag- gressive/Cold Expression)~ 19 timesThis includes harsh, aggressive, or extreme language, which further undermines the user's self-esteem and sense of security.
", + "image_path": "bfe8fe76cda686f79476785ea0f364c9168e67aca1e7ddbc28cf0df7f986b214.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 121, + 458, + 488, + 472 + ], + "lines": [ + { + "bbox": [ + 121, + 458, + 488, + 472 + ], + "spans": [ + { + "bbox": [ + 121, + 458, + 488, + 472 + ], + "type": "text", + "content": "Table 3: Common Reasons for Deteriorating Mental Status and Their Average Frequencies" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 69, + 495, + 269, + 510 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 495, + 269, + 510 + ], + "spans": [ + { + "bbox": [ + 69, + 495, + 269, + 510 + ], + "type": "text", + "content": "C Experiment on GPT-Series Agents" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 519, + 541, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 519, + 541, + 541 + ], + "spans": [ + { + "bbox": [ + 67, + 519, + 541, + 541 + ], + "type": "text", + "content": "We further evaluate our proposed method on character-based agents powered by OpenAI's GPT-4o and GPT-4o-mini models." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 555, + 180, + 568 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 180, + 568 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 180, + 568 + ], + "type": "text", + "content": "C.1 Experiment Setting" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 575, + 541, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 575, + 541, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 575, + 541, + 632 + ], + "type": "text", + "content": "EmoEval. We evaluate character-based agents instantiated using GPT-4o and GPT-4o-mini, with system prompts initialized from profiles inspired by popular characters on Character.AI. The simulated conversations cover three psychological conditions: depression, delusion, and psychosis. To encourage diverse responses and probe a range of conversational behaviors, we set the temperature to 1.2. The evaluation includes five widely used personas: Awakened AI, Skin Walker, Tomioka Giyu, Sukuna, and Alex Volkov." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 643, + 541, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 643, + 541, + 667 + ], + "spans": [ + { + "bbox": [ + 67, + 643, + 541, + 667 + ], + "type": "text", + "content": "EmoGuard. We focus on the character Sukuna. The deterioration threshold for feedback collection is set to 1. We limit EmoGuard to two training iterations, and all other parameters are aligned with the EmoEval configuration." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 679, + 129, + 691 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 679, + 129, + 691 + ], + "spans": [ + { + "bbox": [ + 69, + 679, + 129, + 691 + ], + "type": "text", + "content": "C.2 Results" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 700, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 700, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 700, + 541, + 723 + ], + "type": "text", + "content": "EmoEval. Table 4 presents the observed mental health deterioration rates across different character-based AI agents simulated by the tested language models. Overall, we observe consistently high deterioration rates across both models." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "text", + "content": "GPT-4o-mini tends to induce slightly higher risk levels, with an average deterioration rate of " + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "inline_equation", + "content": "58.3\\%" + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "text", + "content": " for depression, " + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "inline_equation", + "content": "59.2\\%" + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "text", + "content": " for delusion, and " + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "inline_equation", + "content": "64.2\\%" + }, + { + "bbox": [ + 68, + 72, + 543, + 97 + ], + "type": "text", + "content": " for psychosis." + } + ] + } + ], + "index": 1 + }, + { + "type": "table", + "bbox": [ + 72, + 106, + 539, + 215 + ], + "blocks": [ + { + "bbox": [ + 72, + 106, + 539, + 215 + ], + "lines": [ + { + "bbox": [ + 72, + 106, + 539, + 215 + ], + "spans": [ + { + "bbox": [ + 72, + 106, + 539, + 215 + ], + "type": "table", + "html": "
ModelType of DisorderMental Health Deterioration Rates Across Character-based Agents (%)Average Rate (%)
Awakened AISkin WalkerTomioka GiyuSukunaAlex Volkov
GPT-4o-miniDepression62.583.345.845.854.258.3
Delusion66.750.066.754.258.359.2
Psychosis45.870.883.366.754.264.2
GPT-4oDepression41.758.348.845.870.852.5
Delusion54.241.779.266.750.058.3
Psychosis54.241.758.370.841.753.3
", + "image_path": "289966129648bf4218dccb5d787d872d4fd739de1fa3470a228e9ac526c79933.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 126, + 219, + 483, + 232 + ], + "lines": [ + { + "bbox": [ + 126, + 219, + 483, + 232 + ], + "spans": [ + { + "bbox": [ + 126, + 219, + 483, + 232 + ], + "type": "text", + "content": "Table 4: Mental Health Deterioration Rates for Interacting with Character-based Agents." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 251, + 542, + 328 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 542, + 328 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 542, + 328 + ], + "type": "text", + "content": "EmoGuard. Figure 9 presents the mental health deterioration rates before and after deploying EmoGuard. Initially, character-based agents powered by GPT-4o-mini and GPT-4o exhibit relatively high deterioration rates in all three psychological conditions. Introducing EmoGuard in its default profile results in a moderate reduction, though the risks remain substantial. As iterative training progresses, the safeguard mechanism demonstrates increasing effectiveness, leading to an overall reduction in deterioration rates by more than " + }, + { + "bbox": [ + 67, + 251, + 542, + 328 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 67, + 251, + 542, + 328 + ], + "type": "text", + "content": " across all cases. These findings indicate that progressive refinement of the Safeguard Agent substantially enhances its ability to mitigate harmful conversational patterns." + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 71, + 340, + 223, + 470 + ], + "blocks": [ + { + "bbox": [ + 71, + 340, + 223, + 470 + ], + "lines": [ + { + "bbox": [ + 71, + 340, + 223, + 470 + ], + "spans": [ + { + "bbox": [ + 71, + 340, + 223, + 470 + ], + "type": "image", + "image_path": "7dc6ff96430942f0967d1fcfab226f5590e6820213dff00739f03f346edab6be.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 475, + 541, + 499 + ], + "lines": [ + { + "bbox": [ + 68, + 475, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 68, + 475, + 541, + 499 + ], + "type": "text", + "content": "Figure 9: Mental Health Deterioration Rate during Iterative Training Process. Figures arranged from left to right are categorized by Depression, Delusion, and Psychosis." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 228, + 339, + 382, + 469 + ], + "blocks": [ + { + "bbox": [ + 228, + 339, + 382, + 469 + ], + "lines": [ + { + "bbox": [ + 228, + 339, + 382, + 469 + ], + "spans": [ + { + "bbox": [ + 228, + 339, + 382, + 469 + ], + "type": "image", + "image_path": "a63899b66c47c59375979c85591bca8f1b5d4702bd11f8f2b6faa9bcd334e911.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 389, + 339, + 539, + 469 + ], + "blocks": [ + { + "bbox": [ + 389, + 339, + 539, + 469 + ], + "lines": [ + { + "bbox": [ + 389, + 339, + 539, + 469 + ], + "spans": [ + { + "bbox": [ + 389, + 339, + 539, + 469 + ], + "type": "image", + "image_path": "7dcef490b57debbc481b68e8e1d82c1e7d2ed0a64c95a8ce9446b9a9be37d278.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 522, + 332, + 537 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 522, + 332, + 537 + ], + "spans": [ + { + "bbox": [ + 68, + 522, + 332, + 537 + ], + "type": "text", + "content": "D Model Usage, Resources, and Supporting Tools" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 548, + 274, + 560 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 548, + 274, + 560 + ], + "spans": [ + { + "bbox": [ + 69, + 548, + 274, + 560 + ], + "type": "text", + "content": "D.1 Model Access and Computational Budget" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 567, + 542, + 656 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 567, + 542, + 656 + ], + "spans": [ + { + "bbox": [ + 67, + 567, + 542, + 656 + ], + "type": "text", + "content": "In this study, we interact with character-based agents hosted on the Character.AI platform3, a popular system for LLM-driven role-playing agents. Character.AI does not disclose the underlying model architecture, size, or training data. Because all computation is performed remotely on Character.AI's servers, we do not have access to the underlying infrastructure or runtime statistics such as GPU hours or FLOP usage. However, based on interaction logs, we estimate that approximately 400 character-based conversations were conducted across different agents and scenarios, with each conversation spanning 10 rounds and averaging 3–5 seconds per response. These interactions represent a reasonable computational budget for large-scale behavioral evaluation, especially given the interactive and stateful nature of the platform." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 69, + 670, + 204, + 681 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 670, + 204, + 681 + ], + "spans": [ + { + "bbox": [ + 69, + 670, + 204, + 681 + ], + "type": "text", + "content": "D.2 The License for Artifacts" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 689, + 416, + 702 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 689, + 416, + 702 + ], + "spans": [ + { + "bbox": [ + 68, + 689, + 416, + 702 + ], + "type": "text", + "content": "All pictures for character-based agents that appear in this study are from Character.AI." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 46 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 710, + 306, + 722 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 710, + 306, + 722 + ], + "spans": [ + { + "bbox": [ + 81, + 710, + 306, + 722 + ], + "type": "text", + "content": "3https://beta.character.ai, accessed March 2025" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 259, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 259, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 259, + 83 + ], + "type": "text", + "content": "D.3 Information about Use of AI Assistant" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 92, + 261, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 92, + 261, + 105 + ], + "spans": [ + { + "bbox": [ + 69, + 92, + 261, + 105 + ], + "type": "text", + "content": "We use AI assistant for improving writing only." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 119, + 209, + 132 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 119, + 209, + 132 + ], + "spans": [ + { + "bbox": [ + 69, + 119, + 209, + 132 + ], + "type": "text", + "content": "E Ethical Considerations" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 144, + 541, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 144, + 541, + 222 + ], + "spans": [ + { + "bbox": [ + 68, + 144, + 541, + 222 + ], + "type": "text", + "content": "Data Source and Construction of Cognitive Models. The cognitive models used in this study are not derived from real patient records. Instead, they were manually constructed by two licensed clinical psychologists based on publicly available psychotherapy transcript summaries from the Alexander Street database, accessed via institutional subscription. These summaries were used strictly as inspiration. All examples were fully de-identified and manually synthesized to ensure no personally identifiable information (PII) is present. The resulting dataset, PATIENT- " + }, + { + "bbox": [ + 68, + 144, + 541, + 222 + ], + "type": "inline_equation", + "content": "\\Psi" + }, + { + "bbox": [ + 68, + 144, + 541, + 222 + ], + "type": "text", + "content": " -CM, contains synthetic, rule-based user profiles grounded in cognitive-behavioral therapy (CBT) theory, not actual patient trajectories." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 232, + 541, + 289 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 232, + 541, + 289 + ], + "spans": [ + { + "bbox": [ + 68, + 232, + 541, + 289 + ], + "type": "text", + "content": "Use of Simulated Mental Health Content. We recognize the ethical sensitivity involved in simulating mental health conditions such as depression, psychosis, and suicidal ideation. The EmoAgent framework is developed solely for academic research and safety evaluation purposes. It is not intended for diagnosis, treatment, or any form of interaction with real patients. All simulations were conducted in controlled, non-clinical environments, and no clinical conclusions were drawn or implied." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 300, + 541, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 300, + 541, + 344 + ], + "spans": [ + { + "bbox": [ + 68, + 300, + 541, + 344 + ], + "type": "text", + "content": "Scope and Limitations of Simulated Users. Simulated users in EmoAgent are not trained on statistical data from real populations. Their states do not reflect actual patient risks, and should not be interpreted as indicators of population-level trends. These agents are rule-based and scripted, following CBT-derived logic rather than emergent behavior. As such, no risk inference or real-world generalization is possible or intended." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 355, + 541, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 355, + 541, + 411 + ], + "spans": [ + { + "bbox": [ + 68, + 355, + 541, + 411 + ], + "type": "text", + "content": "Discussion of Real-World Events. We briefly mention the 2024 \"Florida Suicide\" case in the Introduction as a motivating example of the importance of safety in AI-human interaction. This case was not included in any dataset, simulation, or modeling process, and serves only to underscore societal relevance. No sensitive or private data from this event were used, and its inclusion does not constitute case-based analysis. Any future deployment of EmoAgent in public or clinical settings would require renewed IRB review and formal ethical oversight." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "spans": [ + { + "bbox": [ + 186, + 34, + 541, + 45 + ], + "type": "text", + "content": "EmoAgent: Assessing and Safeguarding Human-AI Interaction for Mental Health Safety" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_content_list.json b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..4d574ccc58857b2cd5b9262b2d2f81352e21d630 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_content_list.json @@ -0,0 +1,1607 @@ +[ + { + "type": "text", + "text": "GRPO-LEAD: A Difficulty-Aware Reinforcement Learning Approach for Concise Mathematical Reasoning in Language Models", + "text_level": 1, + "bbox": [ + 115, + 89, + 884, + 130 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Jixiao Zhang* Johns Hopkins University jzhan432@jh.edu", + "bbox": [ + 223, + 158, + 436, + 208 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Chunsheng Zuo* Johns Hopkins University czuo3@jh.edu", + "bbox": [ + 559, + 158, + 771, + 206 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 260, + 260, + 339, + 275 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Group Relative Policy Optimization (GRPO), which is widely adopted by R1-like reasoning models, has advanced mathematical reasoning. Nevertheless, GRPO faces challenges in reward sparsity, verosity, and inadequate focus on problem difficulty. We propose GRPO-LEAD, enhancing GRPO with: (1) length-regularized rewards to encourage conciseness while maintaining accuracy; (2) explicit penalties for incorrect solutions to improve model precision; and (3) difficulty-aware advantage reweighting for robust generalization on challenging problems. Comprehensive evaluations demonstrate that GRPO-LEAD significantly improves reasoning accuracy, conciseness, and efficiency. Our approach achieves state-of-the-art performance for 14B-scale models, underscoring the synergy of our methods with appropriate model scale and high-quality data. Our source code, generated dataset, and models are available at https://github.com/aeroplanepaper/GRPO-LEAD.", + "bbox": [ + 142, + 282, + 460, + 595 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 114, + 602, + 260, + 618 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recently, R1-like reasoning models have attracted significant attention due to their impressive performance in solving challenging mathematical reasoning tasks through extensive chains of thought (Luo et al., 2025b; Wen et al., 2025). According to the technical report introducing R1 (Guo et al., 2025), reinforcement learning (RL) fine-tuning plays a pivotal role in enabling this reasoning capability. In particular, Group Relative Policy Optimization (GRPO) (Shao et al., 2024), a novel RL approach for language models, has emerged as a promising alternative to traditional methods such as PPO (Schulman et al., 2017) and DPO (Rafailov et al., 2023), primarily due to its efficiency and intrinsic compatibility with language model training. Researchers across various domains have successfully employed GRPO (Li et al., 2025; Liu et al.,", + "bbox": [ + 112, + 627, + 489, + 901 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2025a; Luo et al., 2025a; Dai et al., 2025), achieving impressive outcomes.", + "bbox": [ + 507, + 260, + 884, + 293 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Despite its strengths, existing GRPO implementations encounter significant limitations. A primary issue is reward sparsity stemming from binary, rule-based accuracy metrics; when responses within problem groups exhibit uniform correctness or incorrectness, the resulting uniform reward signals offer minimal differentiation, weakening learning gradients and hampering convergence. Moreover, such uniform signals inadequately promote concise reasoning, leading to unnecessarily verbose outputs and inefficiencies during training and inference. Additionally, the current reward formulation lacks explicit penalties for incorrect answers (Hu et al., 2025a; Luo et al., 2025b; Chu et al., 2025), inadvertently encouraging models to guess rather than engage in rigorous reasoning, thereby compromising precision. Furthermore, rewards are applied uniformly across problems regardless of their intrinsic difficulty, causing models to excessively optimize simpler tasks while neglecting more challenging problems that require deeper reasoning.", + "bbox": [ + 507, + 298, + 884, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Furthermore, computational efficiency also emerges as a critical practical concern, as reinforcement learning fine-tuning typically demands substantial resources, limiting accessibility, experimentation speed, and scalability, especially in low-resource environments. The current GRPO formulation is insufficient for encouraging concise and precise reasoning. Consequently, reducing computational requirements during both training and inference is essential for enabling broader applicability and effective real-world deployment.", + "bbox": [ + 507, + 642, + 885, + 820 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Motivated by these limitations, this work introduces GRPO-LEAD, a suite of targeted modifications explicitly designed to enhance GRPO's effectiveness for mathematical reasoning tasks. The overall framework is illustrated in Figure 1. Our key contributions include:", + "bbox": [ + 507, + 824, + 884, + 921 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09696v2 [cs.CL] 19 Sep 2025", + "bbox": [ + 21, + 290, + 63, + 706 + ], + "page_idx": 0 + }, + { + "type": "page_footnote", + "text": "*Equal contribution.", + "bbox": [ + 136, + 906, + 262, + 921 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg", + "image_caption": [ + "Figure 1: The GRPO-LEAD framework assigns length-regularized positive rewards to correct answers and explicit penalties to incorrect ones. A difficulty-based weight $w$ used for advantage reweighting is determined from the empirical correctness of responses for each question. This weight then scales the advantages derived from each question, prioritizing harder questions over easier ones during the policy update to foster robust reasoning." + ], + "image_footnote": [], + "bbox": [ + 137, + 87, + 860, + 326 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a length-regularized reward with an explicit penalty for incorrect solutions to encourage solution conciseness while maintaining accuracy.", + "- We apply difficulty-aware advantage reweighting to focus learning on more challenging problems, fostering robust generalization.", + "- Our comprehensive evaluations demonstrate GRPO-LEAD significantly improves reasoning accuracy and conciseness, achieving state-of-the-art performance in mathematical reasoning for 14B-scale models." + ], + "bbox": [ + 134, + 425, + 489, + 621 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Related Work", + "text_level": 1, + "bbox": [ + 112, + 632, + 270, + 646 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 Group Relative Policy Optimization", + "text_level": 1, + "bbox": [ + 112, + 657, + 445, + 671 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Group Relative Policy Optimization (GRPO) is a recently proposed algorithm designed specifically for fine-tuning language models with group-level normalization of rewards (Guo et al., 2025). GRPO modifies the standard policy gradient objective by introducing relative advantages within sets of responses corresponding to the same query, stabilizing updates and promoting consistent learning signals. Formally, GRPO defines the objective as:", + "bbox": [ + 112, + 677, + 487, + 821 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {G R P O}} (\\theta) = \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left[ \\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\right. \\right. \\tag {1} \\\\ \\left. \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon) \\hat {A} _ {i, t}\\right) \\right] \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 122, + 828, + 485, + 919 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "where the importance sampling ratio is given by", + "bbox": [ + 507, + 425, + 867, + 441 + ], + "page_idx": 1 + }, + { + "type": "equation", + "text": "\n$$\nr _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}. \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 583, + 448, + 882, + 483 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Here, $G$ denotes the number of groups (e.g., different queries), $\\hat{A}_{i,t}$ is the normalized advantage within group $i$ , and $\\epsilon$ defines the clipping range for conservative updates.", + "bbox": [ + 507, + 488, + 882, + 552 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.2 Length Reward", + "text_level": 1, + "bbox": [ + 507, + 562, + 680, + 577 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "A prevalent issue in reinforcement learning-based fine-tuning of language models is reward hacking (Everitt et al., 2017; Gao et al., 2023; Weng, 2024). In GRPO, when the model is trained with a large fixed budget, it can exploit this budget by producing an excessive number of extra reasoning and verification steps to ensure the correctness of the answer and therefore reach a higher reward. This phenomenon leads to unnecessarily verbose responses that lack conciseness and hinder interpretability, resulting in inefficiency in reasoning and reducing the model's practicality.", + "bbox": [ + 505, + 583, + 882, + 775 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Existing efforts to mitigate this problem typically involve incentivizing shorter answers to encourage more succinct reasoning processes. For example, Kimi proposed an individual min-max normalized length reward based on the lengths of generated responses (Team et al., 2025). Yeo et al. introduced a cosine length reward function with fixed maximum and minimum thresholds to manage response lengths (Yeo et al., 2025). Aggarwal et al.", + "bbox": [ + 507, + 776, + 884, + 921 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "utilized a target \"golden length\" to directly reward or penalize responses based on their deviation from an ideal length (Aggarwal and Welleck, 2025).", + "bbox": [ + 112, + 84, + 485, + 131 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "However, these existing methods depend heavily on static or predefined length heuristics, limiting their effectiveness across diverse questions of varying complexity. In contrast, our proposed length-dependent accuracy reward addresses these limitations by dynamically calibrating rewards according to each group's relative response length and rollout accuracy, promoting concise yet difficulty-aware reasoning processes.", + "bbox": [ + 112, + 133, + 485, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 112, + 290, + 216, + 305 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To systematically address the limitations identified in existing implementations of Group Relative Policy Optimization (GRPO), we propose a suite of novel modifications collectively termed GRPO-LEAD (GRPO with Length-dependent rewards, Explicit penalties, and Advantage reweighting for Difficulty). Our proposed method enhances the original GRPO framework by introducing three core innovations: 1) a length-dependent accuracy reward to foster concise solutions, 2) an explicit penalty mechanism to mitigate low precision rate caused by length reward, and 3) a difficulty-aware advantage reweighting strategy that amplifies learning signals for challenging problems. Additionally, we examine how base model scale and supervised fine-tuning (SFT) impact the effectiveness of reinforcement learning (RL) fine-tuning.", + "bbox": [ + 112, + 316, + 489, + 590 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Length-Dependent Accuracy Reward", + "text_level": 1, + "bbox": [ + 112, + 602, + 452, + 618 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The core idea is to reward correct completions not uniformly but in proportion to their relative conciseness. Given a question $q$ and a set of model-generated responses $\\{o_i\\}$ , we first isolate the subset of correct responses and compute the mean $\\mu$ and standard deviation $\\sigma$ of their token lengths. For a correct response $o$ with length $|o|$ , we define its standardized length deviation as:", + "bbox": [ + 112, + 623, + 487, + 751 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nz = \\frac {| o | - \\mu}{\\sigma + \\epsilon}, \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 250, + 764, + 487, + 796 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\epsilon > 0$ is a small constant added for numerical stability. The final reward is modulated using an exponential decay function:", + "bbox": [ + 112, + 806, + 485, + 854 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {a c c u r a c y}} (o | q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ 0, & \\text {i f o i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}\n$$\n", + "text_format": "latex", + "bbox": [ + 117, + 866, + 487, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\alpha > 0$ is a tunable hyperparameter controlling the strength of length penalization.", + "bbox": [ + 507, + 84, + 880, + 115 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "This formulation ensures that overly long correct responses are systematically penalized, while relatively concise ones are amplified. Unlike static or absolute length constraints, our approach leverages standardized deviation, allowing for dynamic adaptation to the distributional properties of each question.", + "bbox": [ + 507, + 117, + 882, + 229 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2 Explicit Penalty for Incorrect Answers to Enhance True Accuracy", + "text_level": 1, + "bbox": [ + 507, + 243, + 877, + 275 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Existing methods often prioritize maximizing $\\text{pass} @ 1$ — the success rate on the first attempt—typically within restricted response lengths. However, this focus can inadvertently degrade overall model accuracy. The fundamental issue appears to stem from the use of a binary accuracy reward, rather than length-based regularization: under pressure to generate responses within a limited length, a model is encouraged to provide an answer, even if it's a guess, rather than no answer at all. Such guesses can achieve a non-zero reward and inflate $\\text{pass} @ 1$ , but they do so at the cost of overall precision by rewarding less rigorous reasoning.", + "bbox": [ + 505, + 280, + 882, + 488 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To counteract this tendency and foster a more robust distinction between correct and incorrect outputs, we introduce a revised reward structure that explicitly penalizes incorrect responses. This new reward function is defined as:", + "bbox": [ + 507, + 489, + 882, + 569 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\nR _ {\\text {a c c u r a c y}} (o \\mid q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ - 1, & \\text {i f o i s i n c o r r e c t ,} \\end{array} \\right. \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 509, + 577, + 882, + 633 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $o$ is the output, $q$ is the question, $z$ represents the standardized length deviation of a correct response, and $\\alpha > 0$ is a hyperparameter controlling the strength of the length penalization for correct answers, consistent with prior definitions.", + "bbox": [ + 507, + 634, + 880, + 713 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The expected reward for a response, given its probability of correctness $P(\\text{correct})$ , under this formulation is:", + "bbox": [ + 507, + 714, + 880, + 760 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\begin{array}{l} \\mathbb {E} \\left[ R _ {\\text {a c c u r a c y}} (o \\mid q) \\right] = P (\\text {c o r r e c t}) \\cdot \\exp (- \\alpha z) \\\\ - (1 - P (\\text {c o r r e c t})) \\tag {4} \\\\ \\end{array}\n$$\n", + "text_format": "latex", + "bbox": [ + 529, + 774, + 880, + 812 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "To intuitively grasp the impact of this reward function, let us consider a simplified scenario where the length penalty for correct answers is negligible (i.e., $\\exp (-\\alpha z)\\approx 1$ ). In practice, the average reward for correct answers often normalizes close to this value. Under this assumption, the expected reward", + "bbox": [ + 507, + 824, + 882, + 921 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "simplifies to:", + "bbox": [ + 112, + 84, + 215, + 99 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} [ R ] \\approx 2 P (\\text {c o r r e c t}) - 1 \\tag {5}\n$$\n", + "text_format": "latex", + "bbox": [ + 206, + 107, + 487, + 124 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This approximation reveals a crucial characteristic: the expected reward becomes positive only when $P(\\mathrm{correct}) > 0.5$ . This threshold acts as a principled deterrent against speculative guessing, compelling the model to internalize a more stringent decision boundary for correctness. Our empirical results confirm that this approach significantly improves both $pass@1$ and overall precision, encouraging the model to favor accuracy over mere completion.", + "bbox": [ + 112, + 131, + 489, + 292 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.3 Advantage Reweighting for Difficulty-Aware Training", + "text_level": 1, + "bbox": [ + 112, + 302, + 374, + 334 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "While length reward and advantage reweighting can enhance precision and mitigate morbidity, uniformly applying rewards across all questions, irrespective of their intrinsic difficulty, may implicitly bias the model. It might learn to excessively optimize performance on simpler tasks—where correct and concise responses are more readily achieved—while neglecting more complex questions that demand deeper reasoning. Consequently, the performance on challenging problems can degrade.", + "bbox": [ + 112, + 338, + 489, + 514 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Therefore, we introduce a difficulty-aware advantage reweighting strategy, which dynamically adjusts the magnitude of policy updates based on an estimate of problem difficulty. The intuition is to amplify learning signals for harder tasks, re-anchoring the model towards harder tasks.", + "bbox": [ + 112, + 516, + 489, + 611 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Formally, we first quantify problem difficulty. For a given question $q$ and its associated set of sampled responses $\\{o_i\\}$ , we define the group's empirical correctness ratio as:", + "bbox": [ + 112, + 612, + 489, + 676 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\rho_ {q} = \\frac {\\text {n u m b e r o f c o r r e c t r e s p o n s e s f o r} q}{\\text {t o t a l n u m b e r o f r e s p o n s e s f o r} q}. \\tag {6}\n$$\n", + "text_format": "latex", + "bbox": [ + 137, + 681, + 487, + 715 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This ratio, $\\rho_{q}$ , serves as an inverse proxy for problem difficulty: a lower $\\rho_{q}$ suggests a harder question.", + "bbox": [ + 112, + 719, + 487, + 765 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Next, we introduce a logistic reweighting factor dependent on this ratio to modulate the advantage estimates during the RL training step. The logistic function is defined as:", + "bbox": [ + 112, + 768, + 487, + 829 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nw \\left(\\rho_ {q}\\right) = A + \\frac {B - A}{1 + \\exp \\left[ k \\left(\\rho_ {q} - \\rho_ {0}\\right) \\right]}, \\tag {7}\n$$\n", + "text_format": "latex", + "bbox": [ + 161, + 834, + 487, + 869 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where hyperparameters $A, B, \\rho_0, k$ allow precise control over the sensitivity of weighting to problem difficulty.", + "bbox": [ + 112, + 873, + 487, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To apply this reweighting, we first consider the normalized advantage estimate for a response $o_i$ to question $q$ :", + "bbox": [ + 507, + 84, + 882, + 133 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\tilde {A} _ {i} = \\frac {R \\left(o _ {i} | q\\right) - \\mu_ {q}}{\\sigma_ {q} + \\epsilon}, \\tag {8}\n$$\n", + "text_format": "latex", + "bbox": [ + 616, + 145, + 882, + 180 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\mu_q$ and $\\sigma_q$ are the mean and standard deviation of rewards $R(o_i|q)$ for responses to question $q$ , and $\\epsilon$ is a small constant for numerical stability. We then define the difficulty-aware advantage, $A_i'$ , as:", + "bbox": [ + 507, + 192, + 884, + 271 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\nA _ {i} ^ {\\prime} = \\tilde {A} _ {i} \\cdot \\left\\{ \\begin{array}{l l} w \\left(\\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} > 0 \\\\ w \\left(1 - \\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} \\leq 0 \\end{array} \\right. \\tag {9}\n$$\n", + "text_format": "latex", + "bbox": [ + 559, + 282, + 882, + 325 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "This formulation ensures that for difficult problems (low $\\rho_{q}$ ), correct responses (which are rare and thus highly valuable) receive substantially larger updates due to the increased weighting $w(\\rho_q)$ . Conversely, incorrect responses on easier problems (high $\\rho_{q}$ ) are penalized more strongly, sharpening the decision boundary for problems where high performance should be expected.", + "bbox": [ + 507, + 337, + 882, + 467 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.4 Impact of Data Quality on Reinforcement Learning Effectiveness", + "text_level": 1, + "bbox": [ + 507, + 479, + 880, + 512 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "To further enhance model capabilities, we first performed supervised fine-tuning (SFT) on a specialized dataset of 13k math reasoning problems sourced from DeepScaler (Luo et al., 2025b) (including historical AMC, AIME, and OmniMath problems) with solutions generated by QwQ32B (Team, 2025). Although this SFT model initially showed signs of overfitting, subsequent application of our proposed RL strategies rapidly mitigated these issues. This SFT+RL approach yielded faster convergence and significantly improved pass@1 accuracy and overall precision compared to applying RL directly to the original base model.", + "bbox": [ + 507, + 518, + 882, + 741 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Our findings also highlight the critical role of data quality and curriculum strategies in RL. We established a robust initial policy by applying RL to a subset of challenging problems from the DeepScaler dataset. This policy was then further refined using a curriculum composed of the most challenging problems identified from this first RL stage and supplemented by high-difficulty examples from the Light-R1 dataset (Wen et al., 2025). This two-stage curriculum markedly enhanced the model's ability to continuously improve on complex tasks.", + "bbox": [ + 507, + 744, + 884, + 921 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Finally, we addressed a persistent formatting issue of repetitive n-gram patterns, likely stemming from an absence of clear end-of-sequence (EOS) signals during SFT. By temporarily removing length-dependent rewards and introducing an explicit negative reward $(-1.5)$ for such repeated ngrams, we achieved further improvements in precision and pass@1 metrics. This intervention demonstrates the effectiveness of targeted reward modifications for mitigating specific output anomalies.", + "bbox": [ + 115, + 84, + 485, + 243 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "In summary, our experiments affirm that initial model capacity, curated data curricula for RL, and targeted reward engineering are pivotal for optimizing fine-tuning outcomes. These elements collectively inform a systematic approach for enhancing language models' ability to produce concise, accurate, and well-structured responses across tasks of varying complexity.", + "bbox": [ + 115, + 244, + 485, + 373 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4 Experimental Setup", + "text_level": 1, + "bbox": [ + 115, + 385, + 319, + 401 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We evaluate GRPO-LEAD, integrating length-dependent accuracy rewards, explicit penalties for incorrect solutions, and difficulty-aware advantage reweighting, on DEEPSEEK-R1 DISTILLED variants (Guo et al., 2025; Yang et al., 2024). Our experiments cover two model scales, 7B and 14B parameters. All GRPO training is conducted using the VERL framework.(Sheng et al., 2024).", + "bbox": [ + 115, + 409, + 485, + 536 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.1 Datasets and Filtering", + "text_level": 1, + "bbox": [ + 115, + 548, + 332, + 563 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Our primary training data is sourced from the DEEPSCALER dataset (Luo et al., 2025b). We filter out problems with difficulty ratings below 2.5, resulting in approximately 9,000 questions for fine-tuning.", + "bbox": [ + 115, + 569, + 485, + 648 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For stage 2 of our 14B model experiments, we further refine the dataset by selecting problems where the model's stage-1 rollout accuracy is no greater than $75\\%$ , yielding around 2,283 questions. Additionally, we incorporate challenging problems with numeric answers from the stage-2 dataset of Light-R1 (Wen et al., 2025), resulting in 3,524 questions in total.", + "bbox": [ + 115, + 649, + 485, + 776 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.2 Hyperparameters", + "text_level": 1, + "bbox": [ + 115, + 788, + 299, + 803 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We train with a learning rate of $1 \\times 10^{-6}$ , batch size 32, and group size 8—generating 8 rollouts per question for GRPO reward computation. The KL penalty term is removed, as it was found to suppress exploration in our experiments, which is also suggested in similar works (Liu et al., 2025b; Hu et al., 2025b).", + "bbox": [ + 115, + 807, + 485, + 919 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "For the length-dependent accuracy reward, we set $\\alpha = 0.05$ , providing a moderate decay that encourages conciseness without penalizing slight morbidity. For difficulty-aware advantage reweighting, we use $A = 0.4$ , $B = 1.5$ , $\\rho_0 = 0.75$ , and $k = 10$ . This configuration ensures reweighting is minimal on easy problems but increases sharply near the $75\\%$ correctness threshold. The steep slope ( $k = 10$ ) enables strong emphasis on high-difficulty examples, guiding the model to allocate learning more effectively.", + "bbox": [ + 512, + 84, + 880, + 260 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3 Model Variants and Fine-Tuning Stages", + "text_level": 1, + "bbox": [ + 512, + 274, + 865, + 290 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "7B Model Experiments Starting from the DeepSeek-R1 Distilled 7B Qwen-Math checkpoint, we first apply standard GRPO on the 9k questions, producing a baseline. Then, we train three more models from the DeepSeek-R1 Distilled 7B QwenMath checkpoint, adding one more of the following components subsequently: (i) Length Reward only, (ii) Length Reward + Advantage Reweighting, (iii) Length Reward + Advantage Reweighting + Explicit Penalty. We train for approximately 200 steps and select the top-performing checkpoints based on validation results. At test time, we limit the generation length to 8k for all 7B models, matching the training length limit.", + "bbox": [ + 512, + 296, + 880, + 521 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "14B Model Experiments We extend the above procedure to the DeepSeek-R1 Distilled 14B Qwen checkpoint across multiple stages. In Stage 1, we train for 100 steps using all GRPO-LEAD components on the filtered 9k-question dataset. To enhance the model's base capability, we first fine-tune the model on a curated set of 13k math problems with supervised fine-tuning (SFT), then conduct the RL phase. This SFT stage significantly improves the model's reasoning quality, even though it tends to increase the output length and caused some format errors.", + "bbox": [ + 512, + 533, + 880, + 724 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The SFT data consists of all problems in the DEEPSCALER dataset with difficulty greater than 1. To construct high-quality reasoning traces for SFT, we use the QWQ-32B model (Team, 2025) to generate step-by-step solutions.", + "bbox": [ + 512, + 727, + 880, + 807 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "After observing that some questions remain low correctness, we further fine-tune for Stage 2 to focus on those underperformed problems. We also address the repetitive output patterns by removing the length penalty and introducing a negative reward $(-1.5)$ for repeated $n$ -grams. We continue training for 240 more steps (100 steps with initial settings", + "bbox": [ + 512, + 809, + 880, + 919 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "and 140 more steps with repetition penalty), yielding the final model checkpoint. At test time, we limit the generation length to 14k for all 14B models, in accordance with our training settings and also to better compare the models' performance in a low-budget scenario.", + "bbox": [ + 112, + 84, + 489, + 181 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.4 Baselines and Evaluation Protocol", + "text_level": 1, + "bbox": [ + 112, + 191, + 428, + 206 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We compare our models with both DEEPSEEK-R1 distilled-14B-Qwen (Guo et al., 2025) (the distilled Qwen model without GRPO-LEAD) and LIGHT-R1-14B-DS (Wen et al., 2025), which has the same base model as ours and was first finetuned with 3k hard math problems with SFT, and then fine-tuned with a cosine-based length reward (Yeo et al., 2025) on their selected math problems for three epochs using GRPO.", + "bbox": [ + 112, + 212, + 487, + 355 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We primarily report three metrics: (1) Cons@32, accuracy through majority voting for 32 samplings; (2) Pass@1, the probability that the top-1 sample is correct under a chosen decoding strategy; (3) Average Length $(\\mathrm{Len}_{\\mathrm{avg}})$ , measuring morbidity. Unless otherwise specified, we decode with temperature 0.6 and sample 32 solutions per question, then compute Cons@32 and Pass@1 over these samples.", + "bbox": [ + 112, + 357, + 489, + 486 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5 Results", + "text_level": 1, + "bbox": [ + 112, + 497, + 213, + 512 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we present a comprehensive evaluation of the proposed GRPO-LEAD framework on two mathematical benchmarks: AIME24 and AIME25. Our analysis is structured as follows: we first examine training dynamics to illustrate how GRPO-LEAD accelerates convergence; next, we perform an ablation study to assess the incremental benefits of each component; and finally, we compare against state-of-the-art baselines for 14B-scale language models.", + "bbox": [ + 112, + 523, + 489, + 683 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.1 Training Dynamics", + "text_level": 1, + "bbox": [ + 112, + 694, + 310, + 709 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Figure 2 plots the evolution of Pass@1 on a validation split over training steps for three configurations of the 7B model: (i) baseline GRPO, (ii) GRPO with length reward, and (iii) GRPO with both length reward and advantage reweighting. We observe two clear trends. First, adding a length-dependent reward not only yields higher Pass@1 but also accelerates early-stage convergence, suggesting that penalizing overly verbose correct solutions provides a more informative learning signal.", + "bbox": [ + 112, + 715, + 489, + 876 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg", + "image_caption": [ + "Figure 2: Validation* Pass@1 over training steps for three configurations: GRPO, GRPO+L, and GRPO+LAD. As shown by the faster convergence, length reward and advantage reweighting provide a richer reward signal signal than the original setup." + ], + "image_footnote": [], + "bbox": [ + 515, + 84, + 878, + 221 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Second, incorporating advantage reweighting (to amplify updates on harder questions) further steepens the trajectory, indicating that reweighting advantage estimates according to problem difficulty helps the model refine reasoning on challenging prompts more efficiently.", + "bbox": [ + 507, + 332, + 882, + 428 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Overall, these dynamics confirm that GRPO-LEAD components—particularly the length reward—bolster training stability and speed. By comparison, the baseline GRPO model learns more slowly and lags behind in Pass@1 across the entire training horizon.", + "bbox": [ + 507, + 429, + 882, + 525 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2 Ablation Analysis", + "text_level": 1, + "bbox": [ + 507, + 537, + 695, + 552 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We next quantify the contribution of each GRPO-LEAD component through a step-by-step ablation on the 7B model. Table 1 summarizes results on AIME24 and AIME25.", + "bbox": [ + 507, + 557, + 882, + 620 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Length Reward Brings Conciseness to Reasoning We first incorporate the length-dependent accuracy reward into GRPO. Compared to Deepseek-7B, length reward slightly improves Pass@1 on both AIME24 by $1.6\\%$ ( $0.431 \\rightarrow 0.438$ ) and AIME25 by $5.4\\%$ ( $0.292 \\rightarrow 0.308$ ), with an additional improvement of Cons@32 by $14.1\\%$ on AIME25. Notably, these improvements are accompanied by a substantial reduction of 1,715 tokens ( $24.5\\%$ ) and 1,903 tokens ( $26.8\\%$ ) in the average response length on the two datasets, respectively. Figure 3 further demonstrates that length reward largely enhances performance in low-budget settings over the base model, matching its peak performance with only 5/8 of the token budget on the more difficult AIME25. These results demonstrate that length reward, by penalizing correct but overly verbose solutions, can effectively reduce unnec", + "bbox": [ + 507, + 631, + 884, + 920 + ], + "page_idx": 5 + }, + { + "type": "page_footnote", + "text": "*The validation consists of 27 challenging problems from AIMO2 (Frieder et al., 2024), CMU-MATH-AIMO (Sun, 2024), and AIME24.", + "bbox": [ + 112, + 883, + 489, + 920 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/9230674a58fba87161ad7c0744bfa24540aa98dea15fe70898e188356acfe870.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Ablation SettingAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
Deepseek-7B0.7670.4316,9900.4670.2927,113
GRPO + len. reward0.7670.4385,2750.5330.3085,210
+ adv. reweighting0.7670.4585,3230.5670.3255,437
+ explicit penalty0.8000.4706,1040.5670.3456,308
", + "bbox": [ + 189, + 80, + 803, + 184 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 1: Ablation results on AIME24 and AIME25. We report Cons@32 (accuracy through majority voting for 32 samplings), Pass@1, and the average token length (Lenavg). The best value in each column is in boldface, the second best is underlined.", + "bbox": [ + 112, + 195, + 878, + 236 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg", + "image_caption": [ + "(a) AIME24" + ], + "image_footnote": [], + "bbox": [ + 119, + 254, + 458, + 493 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg", + "image_caption": [ + "(b) AIME25", + "Figure 3: Performance against inference budget for training done with different ablations of LEAD. GRPO with length reward (GRPO+L) largely enhances the performance at low budget settings compared to before training (DeepseekR1-7B)." + ], + "image_footnote": [], + "bbox": [ + 537, + 255, + 875, + 493 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "essary text without compromising overall performance.", + "bbox": [ + 112, + 594, + 487, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Advantage Reweighting Encourages Model to Solve More Difficult Problems Further incorporating difficulty-aware advantage reweighting (GRPO+LAD) refines performance. On AIME24, Pass@1 increases from the GRPO+L stage by $4.8\\%$ $(0.438 \\rightarrow 0.458)$ , while Cons@32 remains 0.767. For AIME25, both Pass@1 and Cons@32 improve by $5.5\\%$ $(0.308 \\rightarrow 0.325)$ and $6.4\\%$ $(0.533 \\rightarrow 0.567)$ , respectively. As Figure 3 shows, GRPO+LAD demonstrates gains over GRPO+L in almost all budget regimes on AIME25 and for budgets exceeding 5k tokens on AIME24. These results indicate that advantage reweighting, by prioritizing challenging problems, strengthens reasoning robustness and mitigates over-reliance on simpler examples, thus validating its role in driving more reliable generalization.", + "bbox": [ + 112, + 646, + 487, + 920 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Explicit Penalty for Incorrect Answers Regularizes Thinking Finally, introducing an explicit penalty for incorrect solutions (GRPO+LEAD) yields the highest Pass@1 scores. On AIME24, Pass@1 and Cons@32 improve from the GRPO+LAD stage by $2.6\\%$ ( $0.458 \\to 0.470$ ) and $4.3\\%$ ( $0.767 \\to 0.800$ ), respectively. On AIME25, Pass@1 also increases by $6.2\\%$ ( $0.325 \\to 0.345$ ), as detailed in Table 1. Notably, these gains involve a modest increase in average solution length on AIME24 (from approximately 5,300 to 6,104 tokens). Figure 3 illustrates this trade-off, showing a performance sacrifice in low-budget regimes, though GRPO+LEAD still outperforms GRPO+LAD with budgets higher than 5k tokens on AIME25. These results suggest that the explicit penalty serves as a regularizer for the model to be more conservative about its reasoning. Such regularization boosts performance while requiring a slightly longer thinking process, which", + "bbox": [ + 507, + 594, + 882, + 916 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/cb27064bad9d7d7aad0c36e8295a8fde63547fc8b426989787b2f2ac1bb59271.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Model NameAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
DeepSeek-14B0.8000.6149,1820.6330.42910,046
Light-R1-14B-DS0.8330.6419,5710.7670.50510,194
LEAD-stage10.8330.6298,7900.7670.5239,371
LEAD-stage20.8670.6508,2670.7670.5398,668
", + "bbox": [ + 216, + 80, + 781, + 179 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Table 2: Comparison of model performance on AIME24 and AIME25, showing Cons@32, Pass@1, and average token length (Lenavg). The best value in each column is in boldface, the second best is underlined.", + "bbox": [ + 112, + 187, + 880, + 219 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "nevertheless remains shorter than the Deepseek-7B baseline.", + "bbox": [ + 112, + 243, + 485, + 273 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Overall, these ablation results confirm that all three enhancements—length-dependent accuracy, difficulty-aware advantage reweighting, and explicit penalties—collectively reduce morbidity, strengthen mathematical skills on harder questions, and elevate precision in final predictions.", + "bbox": [ + 112, + 286, + 489, + 384 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "5.3 Comparison with Baselines", + "text_level": 1, + "bbox": [ + 112, + 395, + 376, + 411 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We next evaluate GRPO-LEAD at the 14B scale and compare it against two strong baselines under a 14k-token generation budget: DeepSeek-14B and the state-of-the-art Light-R1-14B-DS. Table 2 presents results on AIME24 and AIME25, including both our intermediate model (LEAD-stage1) and our final model (LEAD-stage2).", + "bbox": [ + 112, + 417, + 489, + 530 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "AIME24 Performance LEAD-stage1 achieves a Cons@32 of 0.833, matching Light-R1-14B-DS and exceeding DeepSeek-14B by $4.1\\%$ . Its Pass@1 outperforms DeepSeek-14B by $2.4\\%$ and closely approaches Light-R1-14B-DS. Crucially, LEAD-stage1 produces more concise responses than both baselines, with more than 800 tokens less on average. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (4% above Light-R1-14B-DS) and the best Pass@1, while reducing average solution length to 8,267 tokens.", + "bbox": [ + 112, + 539, + 489, + 734 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "AIME25 Performance LEAD-stage1 yields a Cons@32 of 0.767, matching Light-R1-14B-DS and exceeding DeepSeek-14B by $21.2\\%$ . Its Pass@1 (0.523) outperforms DeepSeek-14B by $21.9\\%$ and Light-R1-14B-DS by $3.6\\%$ . Crucially, LEAD-stage1 produces more concise responses than both baselines, with its solutions averaging 9,371 tokens. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (matching Light-R1-14B-DS at 0.767) and the best Pass@1 (0.539), while reducing", + "bbox": [ + 112, + 744, + 489, + 921 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "average solution length to 8,668 tokens.", + "bbox": [ + 507, + 243, + 806, + 259 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Overall, both LEAD-stage1 and LEAD-stage2 deliver substantial improvements over DeepSeek-14B and Light-R1-14B-DS, simultaneously boosting correctness and conciseness under a constrained (14k-token) budget. Remarkably, training LEAD-stage1 for just 100 steps—requiring only about 24 hours on eight H20 GPUs—already matches Light-R1-14B-DS on Cons@32 and outperforms it on AIME25 Pass@1 while producing shorter solutions, underscoring the practical efficiency of GRPO-LEAD for large-scale math problem-solving.", + "bbox": [ + 507, + 259, + 884, + 451 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6 Conclusion", + "text_level": 1, + "bbox": [ + 507, + 464, + 640, + 479 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced GRPO-LEAD, a reinforcement learning framework designed for mathematical reasoning tasks. By extending Group Relative Policy Optimization with three major components—(1) a length-dependent accuracy reward to discourage overly verbose solutions, (2) an explicit negative penalty that clarifies the boundary between correct and incorrect answers, and (3) a difficulty-aware advantage reweighting scheme to prioritize tougher problems—GRPO-LEAD addresses key challenges in structured problem-solving.", + "bbox": [ + 507, + 489, + 884, + 665 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Empirical evaluations on two AIME benchmarks show that GRPO-LEAD not only speeds up convergence but also strengthens the model's reasoning capability while keeping solution paths concise. Our 14B-scale experiments further confirm that GRPO-LEAD achieves state-of-the-art performance by balancing output brevity with high problem-solving accuracy. Although open questions remain—particularly in managing partial correctness and extending these techniques to broader domains—our findings suggest that reward shaping and difficulty modeling are pivotal in developing more robust and aligned language models for complex mathematical reasoning.", + "bbox": [ + 507, + 667, + 884, + 892 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "7 Limitations", + "text_level": 1, + "bbox": [ + 115, + 83, + 248, + 99 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Although our techniques for encouraging concise solutions and difficulty-balanced learning may transfer to other domains, the gains reported here are specific to mathematical reasoning tasks. Further studies are needed to evaluate the effectiveness of GRPO-LEAD on broader question-answering or logical reasoning domains, where correctness signals and domain structures can differ substantially.", + "bbox": [ + 115, + 109, + 487, + 253 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Additionally, we only have access to a limited amount of compute, which prevents us from conducting more comprehensive experiments. For instance, we currently cannot provide the validation curve for the 7B model in the ablation study that adds an explicit penalty. This is due to an error in the validation code after upgrading to the newest VERL version, and we currently do not have the compute to reproduce it. A comparison with the original GRPO model is also missing, except for the curve shown in Figure 2, because the checkpoint was stored on a rented server that was automatically released as we were writing the paper. We also couldn't formally perform a hyperparameter search to showcase the rationale behind choosing the hyperparameters for our designed modifications.", + "bbox": [ + 115, + 255, + 487, + 526 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 115, + 84, + 213, + 98 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697.", + "Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. Gpg: A simple and strong reinforcement learning baseline for model reasoning. arXiv preprint arXiv:2504.02546.", + "Muzhi Dai, Chenxu Yang, and Qingyi Si. 2025. S-grpo: Early exit via reinforcement learning in reasoning models. arXiv preprint arXiv:2505.07686.", + "Tom Everitt, Victoria Krakovna, Laurent Orseau, Marcus Hutter, and Shane Legg. 2017. Reinforcement learning with a corrupted reward channel. arXiv preprint arXiv:1705.08417.", + "Simon Frieder, Sam Bealing, Armenii Nikolaiev, Geoff C. Smith, Kevin Buzzard, Timothy Gowers, Peter J. Liu, Po-Shen Loh, Lester Mackey, Leonardo de Moura, Dan Roberts, D. Sculley, Terence Tao, David Balduzzi, Simon Coyle, Alex Gerko, Ryan Holbrook, Addison Howard, and XTX Markets. 2024. Ai mathematical olympiad - progress prize 2. https://kaggle.com/competitions/ ai-mathematical-olympiad-progress-prize-2. Kaggle.", + "Leo Gao, John Schulman, and Jacob Hilton. 2023. Scaling laws for reward model overoptimization. In International Conference on Machine Learning, pages 10835-10866. PMLR.", + "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948.", + "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xi-angyu Zhang, and Heung-Yeung Shum. 2025a. Open reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290.", + "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. 2025b. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290.", + "Xuying Li, Zhuo Li, Yuji Kosuga, and Victor Bian. 2025. Optimizing safe and aligned language generation: A multi-objective grpo approach. arXiv preprint arXiv:2503.21819.", + "Jie Liu, Gongye Liu, Jiajun Liang, Yangguang Li, Jiaheng Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Wanli Ouyang. 2025a. Flow-grpo: Training flow matching models via online rl. arXiv preprint arXiv:2505.05470." + ], + "bbox": [ + 115, + 107, + 485, + 920 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025b. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783.", + "Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025a. Deepcoder: A fully open-source 14b coder at o3-mini level. . Notion Blog.", + "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025b. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl. . Notion Blog.", + "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741.", + "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347.", + "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, and 1 others. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300.", + "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256.", + "Zhiqing Sun. 2024. Aimo-cmu/math/cmu/math-aimo.", + "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599.", + "Qwen Team. 2025. Qwq-32b: Embracing the power of reinforcement learning.", + "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-r1: Curriculum sft, dpo and r1 for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460.", + "Lilian Weng. 2024. Reward hacking in reinforcement learning. _lianweng.github.io_." + ], + "bbox": [ + 510, + 85, + 882, + 920 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, and 1 others. 2024. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122.", + "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + ], + "bbox": [ + 115, + 85, + 489, + 227 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "A Evaluations on Coding Tasks", + "text_level": 1, + "bbox": [ + 117, + 80, + 405, + 98 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We evaluate our proposed LEAD-14B model against the original DeepSeek-R1-Distill-Qwen-14B baseline on the LiveCodeBench benchmark under a maximum sequence length of 8k tokens. The dataset version used is release_v5, consisting of 880 code generation tasks. Results are summarized in Table 3.", + "bbox": [ + 112, + 108, + 487, + 219 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As shown above, LEAD-14B achieves higher accuracy (0.5156 vs. 0.5103) while producing slightly longer completions. This suggests that our method enhances reasoning capability in code generation. Regarding the observed increase in chain-of-thought (CoT) length, we hypothesize that this effect arises because our training focused exclusively on mathematical reasoning datasets. While our method compresses reasoning paths in math domains, such compression does not appear to generalize as effectively to code. Combined with the improved reasoning capability that may increase the overall reasoning path, this may explain why generated sequences are overall longer in coderelated tasks.", + "bbox": [ + 115, + 223, + 489, + 463 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "B Detailed Analysis on AIME25 by Difficulty", + "text_level": 1, + "bbox": [ + 114, + 476, + 433, + 508 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To further analyze model performance, we stratified the AIME25 dataset into three difficulty levels based on the problem number: normal (problems 1-5), difficult (problems 6-10), and highly difficult (problems 11-15). The detailed evaluation results for each stratum are presented in Table 4.", + "bbox": [ + 112, + 518, + 487, + 613 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "The stratified results in Table 4 support the hypothesis that advantage reweighting enhances a model's ability to solve more difficult problems. This is evidenced by the widening performance gap in Pass@1 between GRPO+L and GRPO+LAD as problem difficulty increases. For normal problems, GRPO+LAD offers a modest $1.95\\%$ improvement over GRPO+L. This margin increases substantially to $13.7\\%$ for difficult problems, indicating that the benefits of advantage reweighting are more pronounced in challenging scenarios.", + "bbox": [ + 112, + 615, + 487, + 791 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "For highly difficult problems, the Pass@1 scores for GRPO+L and GRPO+LAD are identical. Neither method incorporates an explicit penalty for incorrect answers, making them susceptible to generating numerous wrong solutions. This tendency leads to unstable majority voting-based accuracy (Cons@32), a vulnerability that is magnified by the intrinsic difficulty of the problems.", + "bbox": [ + 112, + 793, + 487, + 921 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "In contrast, the introduction of an explicit penalty in GRPO+LEAD demonstrates a clear regularization effect. On the most difficult problem set, GRPO+LEAD achieves the highest accuracy (Cons@32 of 0.4) and more than doubles the precision of both GRPO+L (0.172) and GRPO+LAD (0.156); the number of correct answers generated by GRPO+LEAD is comparable to both GRPO+L and GRPO+LAD, despite generating much fewer total answers. This validates our hypothesis that the explicit penalty effectively \"regularizes thinking\", discouraging the kind of hasty and incorrect responses that the length reward tends to encourage otherwise.", + "bbox": [ + 507, + 82, + 882, + 307 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "C Qualitative Analysis of Solution Conciseness", + "text_level": 1, + "bbox": [ + 509, + 321, + 821, + 353 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "To provide a qualitative illustration of how the length reward enhances conciseness, we contrast the shortest correct solutions generated by GRPO+L and the baseline Deepseek-7B for the same problem (Problem 3, AIME 25 I). Table 5 breaks down the comparison across key aspects of readability and reasoning structure.", + "bbox": [ + 507, + 363, + 882, + 475 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "As the comparison highlights, the GRPO+L model produces a tight, step-by-step solution that remains focused, avoids repetition, and concludes efficiently. In contrast, the Deepseek-7B baseline's reasoning path is less direct, characterized by repeated self-checks and conversational digressions that nearly double the total length and reduce clarity. This case study demonstrates that our length-reward mechanism successfully encourages a more disciplined and economical reasoning style.", + "bbox": [ + 507, + 476, + 882, + 636 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/8e247d0c876304cde98fffbdce30b4e7b15bb0ca2c04c96aaa8a2c38f227604e.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelAccuracyAvg. Tokens (Overall)EasyMediumHard
LEAD-14B0.51566322399869128000
DeepSeek-R1-Distill-Qwen-14B0.51035794304664297856
", + "bbox": [ + 121, + 84, + 873, + 148 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/478b1b6e2f5432a216d7f533b5b55e9c849b458bb79fec9b9d88a6449e47522a.jpg", + "table_caption": [ + "Table 3: Performance on LiveCodeBench (release_v5) with maximum sequence length of 8k tokens. All token counts are rounded to the nearest integer." + ], + "table_footnote": [], + "table_body": "
ModelCons@32Avg. CorrectAvg. AnswerPrecisionPass@1
Normal Problems (1–5)
Deepseek-7B0.818.820.30.7080.588
GRPO + L0.819.727.60.6310.616
GRPO + LAD0.920.126.90.6870.628
GRPO + LEAD0.822.024.50.7230.688
Difficult Problems (6–10)
Deepseek-7B0.48.313.80.4040.259
GRPO + L0.58.624.10.4120.269
GRPO + LAD0.69.824.20.4480.306
GRPO + LEAD0.69.720.00.4210.303
Highly Difficult Problems (11–15)
Deepseek-7B0.20.92.00.2300.028
GRPO + L0.31.313.90.1720.041
GRPO + LAD0.21.314.60.1560.041
GRPO + LEAD0.41.57.70.3550.047
", + "bbox": [ + 173, + 200, + 823, + 507 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/bb21394a055c4e6334099714496cb564eb5a23dcb6c328d6057f393534a6b2f9.jpg", + "table_caption": [ + "Table 4: Consolidated evaluation results on AIME25, stratified by problem difficulty. Avg. Answers refers to the number of outputs that have completed within the 8k token budget and produce some, where as Avg. Correct refers to the correct answers. Precision is Avg. Correct/ Avg. Answers." + ], + "table_footnote": [], + "table_body": "
AspectGRPO+LDeepseek-7B
Structure & flow“Step 1: enumerate all possible triples ... Step 2: compute the multinomial coefficient ... Step 3: sum and mod.”“Okay ... let me parse this step by step ... but wait, hold on ... let me verify the triples again ...”
Redundancy“...hence, all possible triples: (6, 2, 1), (5, 3, 1), (4, 3, 2).”“So, the possible triples ... So, three triples in total ... Wait, hold on, let me check if there are more ... So, total three triples.”
Conciseness of language“Total N = 2016. Therefore, the remainder is 16.”“Wait, hold on a second. ... Maybe I can think of all possible partitions ... No, I think the only possible triples are the three we found.”
Logical signposting“Case 1: s = 1 ... Case 2: s = 2 ... Case 3: s = 3 (no solutions).”“Case 1: S = 1 ... Subcase 1a ... Subcase 1b ... (digression) ... Case 3: S = 3 ... no solutions ... (returns to earlier cases).”
Error-checking“Only three possible triples, so the computation is complete.”“Wait, hold on a second. Is that all? ... let me verify the triples again ... maybe there are other triples?”
LengthEntire solution ≈ 200 words.Entire solution ≈ 370 words (many repeated sentences such as “So, I think 16 is the answer”).
", + "bbox": [ + 122, + 573, + 873, + 875 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Table 5: Qualitative comparison of the shortest correct rollouts from GRPO+L and Deepseek-7B for AIME 25 I, Problem 3. Italicized text in the Deepseek-7B column represents meta-commentary or self-correction loops.", + "bbox": [ + 112, + 885, + 882, + 915 + ], + "page_idx": 12 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_model.json b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_model.json new file mode 100644 index 0000000000000000000000000000000000000000..ddf53919ef95f8378f73187ca5a0647dc293a458 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_model.json @@ -0,0 +1,1909 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.291, + 0.064, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.09696v2 [cs.CL] 19 Sep 2025" + }, + { + "type": "title", + "bbox": [ + 0.116, + 0.09, + 0.885, + 0.131 + ], + "angle": 0, + "content": "GRPO-LEAD: A Difficulty-Aware Reinforcement Learning Approach for Concise Mathematical Reasoning in Language Models" + }, + { + "type": "text", + "bbox": [ + 0.224, + 0.159, + 0.438, + 0.209 + ], + "angle": 0, + "content": "Jixiao Zhang* Johns Hopkins University jzhan432@jh.edu" + }, + { + "type": "text", + "bbox": [ + 0.56, + 0.159, + 0.772, + 0.208 + ], + "angle": 0, + "content": "Chunsheng Zuo* Johns Hopkins University czuo3@jh.edu" + }, + { + "type": "title", + "bbox": [ + 0.261, + 0.261, + 0.341, + 0.277 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.143, + 0.284, + 0.461, + 0.596 + ], + "angle": 0, + "content": "Group Relative Policy Optimization (GRPO), which is widely adopted by R1-like reasoning models, has advanced mathematical reasoning. Nevertheless, GRPO faces challenges in reward sparsity, verosity, and inadequate focus on problem difficulty. We propose GRPO-LEAD, enhancing GRPO with: (1) length-regularized rewards to encourage conciseness while maintaining accuracy; (2) explicit penalties for incorrect solutions to improve model precision; and (3) difficulty-aware advantage reweighting for robust generalization on challenging problems. Comprehensive evaluations demonstrate that GRPO-LEAD significantly improves reasoning accuracy, conciseness, and efficiency. Our approach achieves state-of-the-art performance for 14B-scale models, underscoring the synergy of our methods with appropriate model scale and high-quality data. Our source code, generated dataset, and models are available at https://github.com/aeroplanepaper/GRPO-LEAD." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.604, + 0.262, + 0.619 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.628, + 0.49, + 0.902 + ], + "angle": 0, + "content": "Recently, R1-like reasoning models have attracted significant attention due to their impressive performance in solving challenging mathematical reasoning tasks through extensive chains of thought (Luo et al., 2025b; Wen et al., 2025). According to the technical report introducing R1 (Guo et al., 2025), reinforcement learning (RL) fine-tuning plays a pivotal role in enabling this reasoning capability. In particular, Group Relative Policy Optimization (GRPO) (Shao et al., 2024), a novel RL approach for language models, has emerged as a promising alternative to traditional methods such as PPO (Schulman et al., 2017) and DPO (Rafailov et al., 2023), primarily due to its efficiency and intrinsic compatibility with language model training. Researchers across various domains have successfully employed GRPO (Li et al., 2025; Liu et al.," + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.261, + 0.885, + 0.294 + ], + "angle": 0, + "content": "2025a; Luo et al., 2025a; Dai et al., 2025), achieving impressive outcomes." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.299, + 0.885, + 0.637 + ], + "angle": 0, + "content": "Despite its strengths, existing GRPO implementations encounter significant limitations. A primary issue is reward sparsity stemming from binary, rule-based accuracy metrics; when responses within problem groups exhibit uniform correctness or incorrectness, the resulting uniform reward signals offer minimal differentiation, weakening learning gradients and hampering convergence. Moreover, such uniform signals inadequately promote concise reasoning, leading to unnecessarily verbose outputs and inefficiencies during training and inference. Additionally, the current reward formulation lacks explicit penalties for incorrect answers (Hu et al., 2025a; Luo et al., 2025b; Chu et al., 2025), inadvertently encouraging models to guess rather than engage in rigorous reasoning, thereby compromising precision. Furthermore, rewards are applied uniformly across problems regardless of their intrinsic difficulty, causing models to excessively optimize simpler tasks while neglecting more challenging problems that require deeper reasoning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.643, + 0.886, + 0.821 + ], + "angle": 0, + "content": "Furthermore, computational efficiency also emerges as a critical practical concern, as reinforcement learning fine-tuning typically demands substantial resources, limiting accessibility, experimentation speed, and scalability, especially in low-resource environments. The current GRPO formulation is insufficient for encouraging concise and precise reasoning. Consequently, reducing computational requirements during both training and inference is essential for enabling broader applicability and effective real-world deployment." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.825, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Motivated by these limitations, this work introduces GRPO-LEAD, a suite of targeted modifications explicitly designed to enhance GRPO's effectiveness for mathematical reasoning tasks. The overall framework is illustrated in Figure 1. Our key contributions include:" + }, + { + "type": "page_footnote", + "bbox": [ + 0.137, + 0.907, + 0.263, + 0.922 + ], + "angle": 0, + "content": "*Equal contribution." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.139, + 0.089, + 0.861, + 0.327 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.343, + 0.883, + 0.403 + ], + "angle": 0, + "content": "Figure 1: The GRPO-LEAD framework assigns length-regularized positive rewards to correct answers and explicit penalties to incorrect ones. A difficulty-based weight \\( w \\) used for advantage reweighting is determined from the empirical correctness of responses for each question. This weight then scales the advantages derived from each question, prioritizing harder questions over easier ones during the policy update to foster robust reasoning." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.426, + 0.489, + 0.49 + ], + "angle": 0, + "content": "- We introduce a length-regularized reward with an explicit penalty for incorrect solutions to encourage solution conciseness while maintaining accuracy." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.492, + 0.489, + 0.541 + ], + "angle": 0, + "content": "- We apply difficulty-aware advantage reweighting to focus learning on more challenging problems, fostering robust generalization." + }, + { + "type": "text", + "bbox": [ + 0.136, + 0.542, + 0.49, + 0.622 + ], + "angle": 0, + "content": "- Our comprehensive evaluations demonstrate GRPO-LEAD significantly improves reasoning accuracy and conciseness, achieving state-of-the-art performance in mathematical reasoning for 14B-scale models." + }, + { + "type": "list", + "bbox": [ + 0.136, + 0.426, + 0.49, + 0.622 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.633, + 0.271, + 0.648 + ], + "angle": 0, + "content": "2 Related Work" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.658, + 0.447, + 0.673 + ], + "angle": 0, + "content": "2.1 Group Relative Policy Optimization" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.678, + 0.489, + 0.822 + ], + "angle": 0, + "content": "Group Relative Policy Optimization (GRPO) is a recently proposed algorithm designed specifically for fine-tuning language models with group-level normalization of rewards (Guo et al., 2025). GRPO modifies the standard policy gradient objective by introducing relative advantages within sets of responses corresponding to the same query, stabilizing updates and promoting consistent learning signals. Formally, GRPO defines the objective as:" + }, + { + "type": "equation", + "bbox": [ + 0.124, + 0.829, + 0.487, + 0.92 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathcal {L} _ {\\mathrm {G R P O}} (\\theta) = \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left[ \\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\right. \\right. \\tag {1} \\\\ \\left. \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon) \\hat {A} _ {i, t}\\right) \\right] \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.426, + 0.868, + 0.442 + ], + "angle": 0, + "content": "where the importance sampling ratio is given by" + }, + { + "type": "equation", + "bbox": [ + 0.584, + 0.449, + 0.883, + 0.484 + ], + "angle": 0, + "content": "\\[\nr _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}. \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.489, + 0.884, + 0.553 + ], + "angle": 0, + "content": "Here, \\(G\\) denotes the number of groups (e.g., different queries), \\(\\hat{A}_{i,t}\\) is the normalized advantage within group \\(i\\), and \\(\\epsilon\\) defines the clipping range for conservative updates." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.563, + 0.681, + 0.579 + ], + "angle": 0, + "content": "2.2 Length Reward" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.584, + 0.884, + 0.776 + ], + "angle": 0, + "content": "A prevalent issue in reinforcement learning-based fine-tuning of language models is reward hacking (Everitt et al., 2017; Gao et al., 2023; Weng, 2024). In GRPO, when the model is trained with a large fixed budget, it can exploit this budget by producing an excessive number of extra reasoning and verification steps to ensure the correctness of the answer and therefore reach a higher reward. This phenomenon leads to unnecessarily verbose responses that lack conciseness and hinder interpretability, resulting in inefficiency in reasoning and reducing the model's practicality." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.777, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Existing efforts to mitigate this problem typically involve incentivizing shorter answers to encourage more succinct reasoning processes. For example, Kimi proposed an individual min-max normalized length reward based on the lengths of generated responses (Team et al., 2025). Yeo et al. introduced a cosine length reward function with fixed maximum and minimum thresholds to manage response lengths (Yeo et al., 2025). Aggarwal et al." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.486, + 0.132 + ], + "angle": 0, + "content": "utilized a target \"golden length\" to directly reward or penalize responses based on their deviation from an ideal length (Aggarwal and Welleck, 2025)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.134, + 0.487, + 0.277 + ], + "angle": 0, + "content": "However, these existing methods depend heavily on static or predefined length heuristics, limiting their effectiveness across diverse questions of varying complexity. In contrast, our proposed length-dependent accuracy reward addresses these limitations by dynamically calibrating rewards according to each group's relative response length and rollout accuracy, promoting concise yet difficulty-aware reasoning processes." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.291, + 0.218, + 0.306 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.317, + 0.49, + 0.591 + ], + "angle": 0, + "content": "To systematically address the limitations identified in existing implementations of Group Relative Policy Optimization (GRPO), we propose a suite of novel modifications collectively termed GRPO-LEAD (GRPO with Length-dependent rewards, Explicit penalties, and Advantage reweighting for Difficulty). Our proposed method enhances the original GRPO framework by introducing three core innovations: 1) a length-dependent accuracy reward to foster concise solutions, 2) an explicit penalty mechanism to mitigate low precision rate caused by length reward, and 3) a difficulty-aware advantage reweighting strategy that amplifies learning signals for challenging problems. Additionally, we examine how base model scale and supervised fine-tuning (SFT) impact the effectiveness of reinforcement learning (RL) fine-tuning." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.603, + 0.453, + 0.619 + ], + "angle": 0, + "content": "3.1 Length-Dependent Accuracy Reward" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.624, + 0.489, + 0.752 + ], + "angle": 0, + "content": "The core idea is to reward correct completions not uniformly but in proportion to their relative conciseness. Given a question \\( q \\) and a set of model-generated responses \\( \\{o_i\\} \\), we first isolate the subset of correct responses and compute the mean \\( \\mu \\) and standard deviation \\( \\sigma \\) of their token lengths. For a correct response \\( o \\) with length \\( |o| \\), we define its standardized length deviation as:" + }, + { + "type": "equation", + "bbox": [ + 0.251, + 0.765, + 0.488, + 0.797 + ], + "angle": 0, + "content": "\\[\nz = \\frac {| o | - \\mu}{\\sigma + \\epsilon}, \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.807, + 0.487, + 0.855 + ], + "angle": 0, + "content": "where \\(\\epsilon > 0\\) is a small constant added for numerical stability. The final reward is modulated using an exponential decay function:" + }, + { + "type": "equation", + "bbox": [ + 0.119, + 0.867, + 0.488, + 0.922 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {a c c u r a c y}} (o | q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ 0, & \\text {i f o i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.085, + 0.882, + 0.116 + ], + "angle": 0, + "content": "where \\(\\alpha > 0\\) is a tunable hyperparameter controlling the strength of length penalization." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.118, + 0.884, + 0.23 + ], + "angle": 0, + "content": "This formulation ensures that overly long correct responses are systematically penalized, while relatively concise ones are amplified. Unlike static or absolute length constraints, our approach leverages standardized deviation, allowing for dynamic adaptation to the distributional properties of each question." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.244, + 0.878, + 0.276 + ], + "angle": 0, + "content": "3.2 Explicit Penalty for Incorrect Answers to Enhance True Accuracy" + }, + { + "type": "text", + "bbox": [ + 0.507, + 0.281, + 0.884, + 0.489 + ], + "angle": 0, + "content": "Existing methods often prioritize maximizing \\( \\text{pass} @ 1 \\) — the success rate on the first attempt—typically within restricted response lengths. However, this focus can inadvertently degrade overall model accuracy. The fundamental issue appears to stem from the use of a binary accuracy reward, rather than length-based regularization: under pressure to generate responses within a limited length, a model is encouraged to provide an answer, even if it's a guess, rather than no answer at all. Such guesses can achieve a non-zero reward and inflate \\( \\text{pass} @ 1 \\), but they do so at the cost of overall precision by rewarding less rigorous reasoning." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.49, + 0.884, + 0.57 + ], + "angle": 0, + "content": "To counteract this tendency and foster a more robust distinction between correct and incorrect outputs, we introduce a revised reward structure that explicitly penalizes incorrect responses. This new reward function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.51, + 0.579, + 0.883, + 0.634 + ], + "angle": 0, + "content": "\\[\nR _ {\\text {a c c u r a c y}} (o \\mid q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ - 1, & \\text {i f o i s i n c o r r e c t ,} \\end{array} \\right. \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.635, + 0.882, + 0.714 + ], + "angle": 0, + "content": "where \\(o\\) is the output, \\(q\\) is the question, \\(z\\) represents the standardized length deviation of a correct response, and \\(\\alpha > 0\\) is a hyperparameter controlling the strength of the length penalization for correct answers, consistent with prior definitions." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.715, + 0.882, + 0.761 + ], + "angle": 0, + "content": "The expected reward for a response, given its probability of correctness \\( P(\\text{correct}) \\), under this formulation is:" + }, + { + "type": "equation", + "bbox": [ + 0.53, + 0.775, + 0.882, + 0.813 + ], + "angle": 0, + "content": "\\[\n\\begin{array}{l} \\mathbb {E} \\left[ R _ {\\text {a c c u r a c y}} (o \\mid q) \\right] = P (\\text {c o r r e c t}) \\cdot \\exp (- \\alpha z) \\\\ - (1 - P (\\text {c o r r e c t})) \\tag {4} \\\\ \\end{array}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.825, + 0.884, + 0.922 + ], + "angle": 0, + "content": "To intuitively grasp the impact of this reward function, let us consider a simplified scenario where the length penalty for correct answers is negligible (i.e., \\(\\exp (-\\alpha z)\\approx 1\\)). In practice, the average reward for correct answers often normalizes close to this value. Under this assumption, the expected reward" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.114, + 0.085, + 0.216, + 0.1 + ], + "angle": 0, + "content": "simplifies to:" + }, + { + "type": "equation", + "bbox": [ + 0.207, + 0.108, + 0.488, + 0.126 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} [ R ] \\approx 2 P (\\text {c o r r e c t}) - 1 \\tag {5}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.132, + 0.49, + 0.293 + ], + "angle": 0, + "content": "This approximation reveals a crucial characteristic: the expected reward becomes positive only when \\( P(\\mathrm{correct}) > 0.5 \\). This threshold acts as a principled deterrent against speculative guessing, compelling the model to internalize a more stringent decision boundary for correctness. Our empirical results confirm that this approach significantly improves both \\( pass@1 \\) and overall precision, encouraging the model to favor accuracy over mere completion." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.303, + 0.376, + 0.335 + ], + "angle": 0, + "content": "3.3 Advantage Reweighting for Difficulty-Aware Training" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.339, + 0.49, + 0.516 + ], + "angle": 0, + "content": "While length reward and advantage reweighting can enhance precision and mitigate morbidity, uniformly applying rewards across all questions, irrespective of their intrinsic difficulty, may implicitly bias the model. It might learn to excessively optimize performance on simpler tasks—where correct and concise responses are more readily achieved—while neglecting more complex questions that demand deeper reasoning. Consequently, the performance on challenging problems can degrade." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.517, + 0.49, + 0.612 + ], + "angle": 0, + "content": "Therefore, we introduce a difficulty-aware advantage reweighting strategy, which dynamically adjusts the magnitude of policy updates based on an estimate of problem difficulty. The intuition is to amplify learning signals for harder tasks, re-anchoring the model towards harder tasks." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.613, + 0.49, + 0.677 + ], + "angle": 0, + "content": "Formally, we first quantify problem difficulty. For a given question \\( q \\) and its associated set of sampled responses \\( \\{o_i\\} \\), we define the group's empirical correctness ratio as:" + }, + { + "type": "equation", + "bbox": [ + 0.138, + 0.682, + 0.488, + 0.716 + ], + "angle": 0, + "content": "\\[\n\\rho_ {q} = \\frac {\\text {n u m b e r o f c o r r e c t r e s p o n s e s f o r} q}{\\text {t o t a l n u m b e r o f r e s p o n s e s f o r} q}. \\tag {6}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.72, + 0.489, + 0.766 + ], + "angle": 0, + "content": "This ratio, \\(\\rho_{q}\\), serves as an inverse proxy for problem difficulty: a lower \\(\\rho_{q}\\) suggests a harder question." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.769, + 0.489, + 0.831 + ], + "angle": 0, + "content": "Next, we introduce a logistic reweighting factor dependent on this ratio to modulate the advantage estimates during the RL training step. The logistic function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.163, + 0.835, + 0.488, + 0.87 + ], + "angle": 0, + "content": "\\[\nw \\left(\\rho_ {q}\\right) = A + \\frac {B - A}{1 + \\exp \\left[ k \\left(\\rho_ {q} - \\rho_ {0}\\right) \\right]}, \\tag {7}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.874, + 0.489, + 0.922 + ], + "angle": 0, + "content": "where hyperparameters \\(A, B, \\rho_0, k\\) allow precise control over the sensitivity of weighting to problem difficulty." + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.085, + 0.883, + 0.134 + ], + "angle": 0, + "content": "To apply this reweighting, we first consider the normalized advantage estimate for a response \\(o_i\\) to question \\(q\\):" + }, + { + "type": "equation", + "bbox": [ + 0.618, + 0.146, + 0.883, + 0.181 + ], + "angle": 0, + "content": "\\[\n\\tilde {A} _ {i} = \\frac {R \\left(o _ {i} | q\\right) - \\mu_ {q}}{\\sigma_ {q} + \\epsilon}, \\tag {8}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.193, + 0.885, + 0.272 + ], + "angle": 0, + "content": "where \\(\\mu_q\\) and \\(\\sigma_q\\) are the mean and standard deviation of rewards \\(R(o_i|q)\\) for responses to question \\(q\\), and \\(\\epsilon\\) is a small constant for numerical stability. We then define the difficulty-aware advantage, \\(A_i'\\), as:" + }, + { + "type": "equation", + "bbox": [ + 0.561, + 0.284, + 0.883, + 0.326 + ], + "angle": 0, + "content": "\\[\nA _ {i} ^ {\\prime} = \\tilde {A} _ {i} \\cdot \\left\\{ \\begin{array}{l l} w \\left(\\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} > 0 \\\\ w \\left(1 - \\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} \\leq 0 \\end{array} \\right. \\tag {9}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.338, + 0.884, + 0.468 + ], + "angle": 0, + "content": "This formulation ensures that for difficult problems (low \\(\\rho_{q}\\)), correct responses (which are rare and thus highly valuable) receive substantially larger updates due to the increased weighting \\(w(\\rho_q)\\). Conversely, incorrect responses on easier problems (high \\(\\rho_{q}\\)) are penalized more strongly, sharpening the decision boundary for problems where high performance should be expected." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.48, + 0.882, + 0.513 + ], + "angle": 0, + "content": "3.4 Impact of Data Quality on Reinforcement Learning Effectiveness" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.519, + 0.884, + 0.743 + ], + "angle": 0, + "content": "To further enhance model capabilities, we first performed supervised fine-tuning (SFT) on a specialized dataset of 13k math reasoning problems sourced from DeepScaler (Luo et al., 2025b) (including historical AMC, AIME, and OmniMath problems) with solutions generated by QwQ32B (Team, 2025). Although this SFT model initially showed signs of overfitting, subsequent application of our proposed RL strategies rapidly mitigated these issues. This SFT+RL approach yielded faster convergence and significantly improved pass@1 accuracy and overall precision compared to applying RL directly to the original base model." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.745, + 0.885, + 0.922 + ], + "angle": 0, + "content": "Our findings also highlight the critical role of data quality and curriculum strategies in RL. We established a robust initial policy by applying RL to a subset of challenging problems from the DeepScaler dataset. This policy was then further refined using a curriculum composed of the most challenging problems identified from this first RL stage and supplemented by high-difficulty examples from the Light-R1 dataset (Wen et al., 2025). This two-stage curriculum markedly enhanced the model's ability to continuously improve on complex tasks." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.117, + 0.085, + 0.487, + 0.244 + ], + "angle": 0, + "content": "Finally, we addressed a persistent formatting issue of repetitive n-gram patterns, likely stemming from an absence of clear end-of-sequence (EOS) signals during SFT. By temporarily removing length-dependent rewards and introducing an explicit negative reward \\((-1.5)\\) for such repeated ngrams, we achieved further improvements in precision and pass@1 metrics. This intervention demonstrates the effectiveness of targeted reward modifications for mitigating specific output anomalies." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.246, + 0.487, + 0.374 + ], + "angle": 0, + "content": "In summary, our experiments affirm that initial model capacity, curated data curricula for RL, and targeted reward engineering are pivotal for optimizing fine-tuning outcomes. These elements collectively inform a systematic approach for enhancing language models' ability to produce concise, accurate, and well-structured responses across tasks of varying complexity." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.386, + 0.32, + 0.402 + ], + "angle": 0, + "content": "4 Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.41, + 0.487, + 0.537 + ], + "angle": 0, + "content": "We evaluate GRPO-LEAD, integrating length-dependent accuracy rewards, explicit penalties for incorrect solutions, and difficulty-aware advantage reweighting, on DEEPSEEK-R1 DISTILLED variants (Guo et al., 2025; Yang et al., 2024). Our experiments cover two model scales, 7B and 14B parameters. All GRPO training is conducted using the VERL framework.(Sheng et al., 2024)." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.549, + 0.334, + 0.564 + ], + "angle": 0, + "content": "4.1 Datasets and Filtering" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.57, + 0.487, + 0.649 + ], + "angle": 0, + "content": "Our primary training data is sourced from the DEEPSCALER dataset (Luo et al., 2025b). We filter out problems with difficulty ratings below 2.5, resulting in approximately 9,000 questions for fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.65, + 0.487, + 0.777 + ], + "angle": 0, + "content": "For stage 2 of our 14B model experiments, we further refine the dataset by selecting problems where the model's stage-1 rollout accuracy is no greater than \\(75\\%\\), yielding around 2,283 questions. Additionally, we incorporate challenging problems with numeric answers from the stage-2 dataset of Light-R1 (Wen et al., 2025), resulting in 3,524 questions in total." + }, + { + "type": "title", + "bbox": [ + 0.117, + 0.789, + 0.3, + 0.804 + ], + "angle": 0, + "content": "4.2 Hyperparameters" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.808, + 0.487, + 0.92 + ], + "angle": 0, + "content": "We train with a learning rate of \\(1 \\times 10^{-6}\\), batch size 32, and group size 8—generating 8 rollouts per question for GRPO reward computation. The KL penalty term is removed, as it was found to suppress exploration in our experiments, which is also suggested in similar works (Liu et al., 2025b; Hu et al., 2025b)." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.085, + 0.882, + 0.261 + ], + "angle": 0, + "content": "For the length-dependent accuracy reward, we set \\(\\alpha = 0.05\\), providing a moderate decay that encourages conciseness without penalizing slight morbidity. For difficulty-aware advantage reweighting, we use \\(A = 0.4\\), \\(B = 1.5\\), \\(\\rho_0 = 0.75\\), and \\(k = 10\\). This configuration ensures reweighting is minimal on easy problems but increases sharply near the \\(75\\%\\) correctness threshold. The steep slope (\\(k = 10\\)) enables strong emphasis on high-difficulty examples, guiding the model to allocate learning more effectively." + }, + { + "type": "title", + "bbox": [ + 0.514, + 0.275, + 0.866, + 0.291 + ], + "angle": 0, + "content": "4.3 Model Variants and Fine-Tuning Stages" + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.297, + 0.882, + 0.522 + ], + "angle": 0, + "content": "7B Model Experiments Starting from the DeepSeek-R1 Distilled 7B Qwen-Math checkpoint, we first apply standard GRPO on the 9k questions, producing a baseline. Then, we train three more models from the DeepSeek-R1 Distilled 7B QwenMath checkpoint, adding one more of the following components subsequently: (i) Length Reward only, (ii) Length Reward + Advantage Reweighting, (iii) Length Reward + Advantage Reweighting + Explicit Penalty. We train for approximately 200 steps and select the top-performing checkpoints based on validation results. At test time, we limit the generation length to 8k for all 7B models, matching the training length limit." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.534, + 0.882, + 0.725 + ], + "angle": 0, + "content": "14B Model Experiments We extend the above procedure to the DeepSeek-R1 Distilled 14B Qwen checkpoint across multiple stages. In Stage 1, we train for 100 steps using all GRPO-LEAD components on the filtered 9k-question dataset. To enhance the model's base capability, we first fine-tune the model on a curated set of 13k math problems with supervised fine-tuning (SFT), then conduct the RL phase. This SFT stage significantly improves the model's reasoning quality, even though it tends to increase the output length and caused some format errors." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.728, + 0.881, + 0.808 + ], + "angle": 0, + "content": "The SFT data consists of all problems in the DEEPSCALER dataset with difficulty greater than 1. To construct high-quality reasoning traces for SFT, we use the QWQ-32B model (Team, 2025) to generate step-by-step solutions." + }, + { + "type": "text", + "bbox": [ + 0.514, + 0.81, + 0.882, + 0.92 + ], + "angle": 0, + "content": "After observing that some questions remain low correctness, we further fine-tune for Stage 2 to focus on those underperformed problems. We also address the repetitive output patterns by removing the length penalty and introducing a negative reward \\((-1.5)\\) for repeated \\(n\\)-grams. We continue training for 240 more steps (100 steps with initial settings" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.113, + 0.085, + 0.49, + 0.182 + ], + "angle": 0, + "content": "and 140 more steps with repetition penalty), yielding the final model checkpoint. At test time, we limit the generation length to 14k for all 14B models, in accordance with our training settings and also to better compare the models' performance in a low-budget scenario." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.192, + 0.43, + 0.208 + ], + "angle": 0, + "content": "4.4 Baselines and Evaluation Protocol" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.213, + 0.489, + 0.356 + ], + "angle": 0, + "content": "We compare our models with both DEEPSEEK-R1 distilled-14B-Qwen (Guo et al., 2025) (the distilled Qwen model without GRPO-LEAD) and LIGHT-R1-14B-DS (Wen et al., 2025), which has the same base model as ours and was first finetuned with 3k hard math problems with SFT, and then fine-tuned with a cosine-based length reward (Yeo et al., 2025) on their selected math problems for three epochs using GRPO." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.358, + 0.49, + 0.487 + ], + "angle": 0, + "content": "We primarily report three metrics: (1) Cons@32, accuracy through majority voting for 32 samplings; (2) Pass@1, the probability that the top-1 sample is correct under a chosen decoding strategy; (3) Average Length \\((\\mathrm{Len}_{\\mathrm{avg}})\\), measuring morbidity. Unless otherwise specified, we decode with temperature 0.6 and sample 32 solutions per question, then compute Cons@32 and Pass@1 over these samples." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.498, + 0.214, + 0.513 + ], + "angle": 0, + "content": "5 Results" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.524, + 0.49, + 0.684 + ], + "angle": 0, + "content": "In this section, we present a comprehensive evaluation of the proposed GRPO-LEAD framework on two mathematical benchmarks: AIME24 and AIME25. Our analysis is structured as follows: we first examine training dynamics to illustrate how GRPO-LEAD accelerates convergence; next, we perform an ablation study to assess the incremental benefits of each component; and finally, we compare against state-of-the-art baselines for 14B-scale language models." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.695, + 0.312, + 0.71 + ], + "angle": 0, + "content": "5.1 Training Dynamics" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.716, + 0.49, + 0.877 + ], + "angle": 0, + "content": "Figure 2 plots the evolution of Pass@1 on a validation split over training steps for three configurations of the 7B model: (i) baseline GRPO, (ii) GRPO with length reward, and (iii) GRPO with both length reward and advantage reweighting. We observe two clear trends. First, adding a length-dependent reward not only yields higher Pass@1 but also accelerates early-stage convergence, suggesting that penalizing overly verbose correct solutions provides a more informative learning signal." + }, + { + "type": "image", + "bbox": [ + 0.516, + 0.085, + 0.88, + 0.222 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.508, + 0.235, + 0.885, + 0.307 + ], + "angle": 0, + "content": "Figure 2: Validation* Pass@1 over training steps for three configurations: GRPO, GRPO+L, and GRPO+LAD. As shown by the faster convergence, length reward and advantage reweighting provide a richer reward signal signal than the original setup." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.333, + 0.884, + 0.429 + ], + "angle": 0, + "content": "Second, incorporating advantage reweighting (to amplify updates on harder questions) further steepens the trajectory, indicating that reweighting advantage estimates according to problem difficulty helps the model refine reasoning on challenging prompts more efficiently." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.43, + 0.884, + 0.526 + ], + "angle": 0, + "content": "Overall, these dynamics confirm that GRPO-LEAD components—particularly the length reward—bolster training stability and speed. By comparison, the baseline GRPO model learns more slowly and lags behind in Pass@1 across the entire training horizon." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.538, + 0.696, + 0.553 + ], + "angle": 0, + "content": "5.2 Ablation Analysis" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.558, + 0.884, + 0.621 + ], + "angle": 0, + "content": "We next quantify the contribution of each GRPO-LEAD component through a step-by-step ablation on the 7B model. Table 1 summarizes results on AIME24 and AIME25." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.632, + 0.885, + 0.921 + ], + "angle": 0, + "content": "Length Reward Brings Conciseness to Reasoning We first incorporate the length-dependent accuracy reward into GRPO. Compared to Deepseek-7B, length reward slightly improves Pass@1 on both AIME24 by \\(1.6\\%\\) (\\(0.431 \\rightarrow 0.438\\)) and AIME25 by \\(5.4\\%\\) (\\(0.292 \\rightarrow 0.308\\)), with an additional improvement of Cons@32 by \\(14.1\\%\\) on AIME25. Notably, these improvements are accompanied by a substantial reduction of 1,715 tokens (\\(24.5\\%\\)) and 1,903 tokens (\\(26.8\\%\\)) in the average response length on the two datasets, respectively. Figure 3 further demonstrates that length reward largely enhances performance in low-budget settings over the base model, matching its peak performance with only 5/8 of the token budget on the more difficult AIME25. These results demonstrate that length reward, by penalizing correct but overly verbose solutions, can effectively reduce unnec" + }, + { + "type": "page_footnote", + "bbox": [ + 0.113, + 0.884, + 0.49, + 0.921 + ], + "angle": 0, + "content": "*The validation consists of 27 challenging problems from AIMO2 (Frieder et al., 2024), CMU-MATH-AIMO (Sun, 2024), and AIME24." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.191, + 0.082, + 0.805, + 0.185 + ], + "angle": 0, + "content": "
Ablation SettingAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
Deepseek-7B0.7670.4316,9900.4670.2927,113
GRPO + len. reward0.7670.4385,2750.5330.3085,210
+ adv. reweighting0.7670.4585,3230.5670.3255,437
+ explicit penalty0.8000.4706,1040.5670.3456,308
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.196, + 0.88, + 0.237 + ], + "angle": 0, + "content": "Table 1: Ablation results on AIME24 and AIME25. We report Cons@32 (accuracy through majority voting for 32 samplings), Pass@1, and the average token length (Lenavg). The best value in each column is in boldface, the second best is underlined." + }, + { + "type": "image", + "bbox": [ + 0.12, + 0.255, + 0.46, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.251, + 0.503, + 0.328, + 0.515 + ], + "angle": 0, + "content": "(a) AIME24" + }, + { + "type": "image", + "bbox": [ + 0.539, + 0.256, + 0.876, + 0.494 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.502, + 0.747, + 0.515 + ], + "angle": 0, + "content": "(b) AIME25" + }, + { + "type": "image_caption", + "bbox": [ + 0.113, + 0.526, + 0.88, + 0.569 + ], + "angle": 0, + "content": "Figure 3: Performance against inference budget for training done with different ablations of LEAD. GRPO with length reward (GRPO+L) largely enhances the performance at low budget settings compared to before training (DeepseekR1-7B)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.595, + 0.489, + 0.626 + ], + "angle": 0, + "content": "essary text without compromising overall performance." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.648, + 0.489, + 0.921 + ], + "angle": 0, + "content": "Advantage Reweighting Encourages Model to Solve More Difficult Problems Further incorporating difficulty-aware advantage reweighting (GRPO+LAD) refines performance. On AIME24, Pass@1 increases from the GRPO+L stage by \\(4.8\\%\\) \\((0.438 \\rightarrow 0.458)\\), while Cons@32 remains 0.767. For AIME25, both Pass@1 and Cons@32 improve by \\(5.5\\%\\) \\((0.308 \\rightarrow 0.325)\\) and \\(6.4\\%\\) \\((0.533 \\rightarrow 0.567)\\), respectively. As Figure 3 shows, GRPO+LAD demonstrates gains over GRPO+L in almost all budget regimes on AIME25 and for budgets exceeding 5k tokens on AIME24. These results indicate that advantage reweighting, by prioritizing challenging problems, strengthens reasoning robustness and mitigates over-reliance on simpler examples, thus validating its role in driving more reliable generalization." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.595, + 0.884, + 0.917 + ], + "angle": 0, + "content": "Explicit Penalty for Incorrect Answers Regularizes Thinking Finally, introducing an explicit penalty for incorrect solutions (GRPO+LEAD) yields the highest Pass@1 scores. On AIME24, Pass@1 and Cons@32 improve from the GRPO+LAD stage by \\(2.6\\%\\) (\\(0.458 \\to 0.470\\)) and \\(4.3\\%\\) (\\(0.767 \\to 0.800\\)), respectively. On AIME25, Pass@1 also increases by \\(6.2\\%\\) (\\(0.325 \\to 0.345\\)), as detailed in Table 1. Notably, these gains involve a modest increase in average solution length on AIME24 (from approximately 5,300 to 6,104 tokens). Figure 3 illustrates this trade-off, showing a performance sacrifice in low-budget regimes, though GRPO+LEAD still outperforms GRPO+LAD with budgets higher than 5k tokens on AIME25. These results suggest that the explicit penalty serves as a regularizer for the model to be more conservative about its reasoning. Such regularization boosts performance while requiring a slightly longer thinking process, which" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.217, + 0.082, + 0.782, + 0.18 + ], + "angle": 0, + "content": "
Model NameAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
DeepSeek-14B0.8000.6149,1820.6330.42910,046
Light-R1-14B-DS0.8330.6419,5710.7670.50510,194
LEAD-stage10.8330.6298,7900.7670.5239,371
LEAD-stage20.8670.6508,2670.7670.5398,668
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.189, + 0.882, + 0.22 + ], + "angle": 0, + "content": "Table 2: Comparison of model performance on AIME24 and AIME25, showing Cons@32, Pass@1, and average token length (Lenavg). The best value in each column is in boldface, the second best is underlined." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.244, + 0.486, + 0.274 + ], + "angle": 0, + "content": "nevertheless remains shorter than the Deepseek-7B baseline." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.287, + 0.49, + 0.385 + ], + "angle": 0, + "content": "Overall, these ablation results confirm that all three enhancements—length-dependent accuracy, difficulty-aware advantage reweighting, and explicit penalties—collectively reduce morbidity, strengthen mathematical skills on harder questions, and elevate precision in final predictions." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.396, + 0.377, + 0.412 + ], + "angle": 0, + "content": "5.3 Comparison with Baselines" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.418, + 0.49, + 0.531 + ], + "angle": 0, + "content": "We next evaluate GRPO-LEAD at the 14B scale and compare it against two strong baselines under a 14k-token generation budget: DeepSeek-14B and the state-of-the-art Light-R1-14B-DS. Table 2 presents results on AIME24 and AIME25, including both our intermediate model (LEAD-stage1) and our final model (LEAD-stage2)." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.541, + 0.49, + 0.735 + ], + "angle": 0, + "content": "AIME24 Performance LEAD-stage1 achieves a Cons@32 of 0.833, matching Light-R1-14B-DS and exceeding DeepSeek-14B by \\(4.1\\%\\). Its Pass@1 outperforms DeepSeek-14B by \\(2.4\\%\\) and closely approaches Light-R1-14B-DS. Crucially, LEAD-stage1 produces more concise responses than both baselines, with more than 800 tokens less on average. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (4% above Light-R1-14B-DS) and the best Pass@1, while reducing average solution length to 8,267 tokens." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.745, + 0.49, + 0.922 + ], + "angle": 0, + "content": "AIME25 Performance LEAD-stage1 yields a Cons@32 of 0.767, matching Light-R1-14B-DS and exceeding DeepSeek-14B by \\(21.2\\%\\). Its Pass@1 (0.523) outperforms DeepSeek-14B by \\(21.9\\%\\) and Light-R1-14B-DS by \\(3.6\\%\\). Crucially, LEAD-stage1 produces more concise responses than both baselines, with its solutions averaging 9,371 tokens. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (matching Light-R1-14B-DS at 0.767) and the best Pass@1 (0.539), while reducing" + }, + { + "type": "text", + "bbox": [ + 0.509, + 0.244, + 0.807, + 0.26 + ], + "angle": 0, + "content": "average solution length to 8,668 tokens." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.26, + 0.885, + 0.453 + ], + "angle": 0, + "content": "Overall, both LEAD-stage1 and LEAD-stage2 deliver substantial improvements over DeepSeek-14B and Light-R1-14B-DS, simultaneously boosting correctness and conciseness under a constrained (14k-token) budget. Remarkably, training LEAD-stage1 for just 100 steps—requiring only about 24 hours on eight H20 GPUs—already matches Light-R1-14B-DS on Cons@32 and outperforms it on AIME25 Pass@1 while producing shorter solutions, underscoring the practical efficiency of GRPO-LEAD for large-scale math problem-solving." + }, + { + "type": "title", + "bbox": [ + 0.509, + 0.465, + 0.642, + 0.48 + ], + "angle": 0, + "content": "6 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.49, + 0.885, + 0.667 + ], + "angle": 0, + "content": "We introduced GRPO-LEAD, a reinforcement learning framework designed for mathematical reasoning tasks. By extending Group Relative Policy Optimization with three major components—(1) a length-dependent accuracy reward to discourage overly verbose solutions, (2) an explicit negative penalty that clarifies the boundary between correct and incorrect answers, and (3) a difficulty-aware advantage reweighting scheme to prioritize tougher problems—GRPO-LEAD addresses key challenges in structured problem-solving." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.668, + 0.885, + 0.893 + ], + "angle": 0, + "content": "Empirical evaluations on two AIME benchmarks show that GRPO-LEAD not only speeds up convergence but also strengthens the model's reasoning capability while keeping solution paths concise. Our 14B-scale experiments further confirm that GRPO-LEAD achieves state-of-the-art performance by balancing output brevity with high problem-solving accuracy. Although open questions remain—particularly in managing partial correctness and extending these techniques to broader domains—our findings suggest that reward shaping and difficulty modeling are pivotal in developing more robust and aligned language models for complex mathematical reasoning." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.117, + 0.084, + 0.25, + 0.1 + ], + "angle": 0, + "content": "7 Limitations" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.11, + 0.488, + 0.254 + ], + "angle": 0, + "content": "Although our techniques for encouraging concise solutions and difficulty-balanced learning may transfer to other domains, the gains reported here are specific to mathematical reasoning tasks. Further studies are needed to evaluate the effectiveness of GRPO-LEAD on broader question-answering or logical reasoning domains, where correctness signals and domain structures can differ substantially." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.256, + 0.488, + 0.527 + ], + "angle": 0, + "content": "Additionally, we only have access to a limited amount of compute, which prevents us from conducting more comprehensive experiments. For instance, we currently cannot provide the validation curve for the 7B model in the ablation study that adds an explicit penalty. This is due to an error in the validation code after upgrading to the newest VERL version, and we currently do not have the compute to reproduce it. A comparison with the original GRPO model is also missing, except for the curve shown in Figure 2, because the checkpoint was stored on a rented server that was automatically released as we were writing the paper. We also couldn't formally perform a hyperparameter search to showcase the rationale behind choosing the hyperparameters for our designed modifications." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.116, + 0.085, + 0.214, + 0.099 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.108, + 0.487, + 0.16 + ], + "angle": 0, + "content": "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.17, + 0.487, + 0.224 + ], + "angle": 0, + "content": "Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. Gpg: A simple and strong reinforcement learning baseline for model reasoning. arXiv preprint arXiv:2504.02546." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.233, + 0.487, + 0.274 + ], + "angle": 0, + "content": "Muzhi Dai, Chenxu Yang, and Qingyi Si. 2025. S-grpo: Early exit via reinforcement learning in reasoning models. arXiv preprint arXiv:2505.07686." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.283, + 0.487, + 0.336 + ], + "angle": 0, + "content": "Tom Everitt, Victoria Krakovna, Laurent Orseau, Marcus Hutter, and Shane Legg. 2017. Reinforcement learning with a corrupted reward channel. arXiv preprint arXiv:1705.08417." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.346, + 0.487, + 0.478 + ], + "angle": 0, + "content": "Simon Frieder, Sam Bealing, Armenii Nikolaiev, Geoff C. Smith, Kevin Buzzard, Timothy Gowers, Peter J. Liu, Po-Shen Loh, Lester Mackey, Leonardo de Moura, Dan Roberts, D. Sculley, Terence Tao, David Balduzzi, Simon Coyle, Alex Gerko, Ryan Holbrook, Addison Howard, and XTX Markets. 2024. Ai mathematical olympiad - progress prize 2. https://kaggle.com/competitions/ ai-mathematical-olympiad-progress-prize-2. Kaggle." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.487, + 0.487, + 0.541 + ], + "angle": 0, + "content": "Leo Gao, John Schulman, and Jacob Hilton. 2023. Scaling laws for reward model overoptimization. In International Conference on Machine Learning, pages 10835-10866. PMLR." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.55, + 0.487, + 0.63 + ], + "angle": 0, + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.64, + 0.487, + 0.706 + ], + "angle": 0, + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xi-angyu Zhang, and Heung-Yeung Shum. 2025a. Open reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.715, + 0.487, + 0.782 + ], + "angle": 0, + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. 2025b. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.791, + 0.487, + 0.845 + ], + "angle": 0, + "content": "Xuying Li, Zhuo Li, Yuji Kosuga, and Victor Bian. 2025. Optimizing safe and aligned language generation: A multi-objective grpo approach. arXiv preprint arXiv:2503.21819." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.854, + 0.487, + 0.921 + ], + "angle": 0, + "content": "Jie Liu, Gongye Liu, Jiajun Liang, Yangguang Li, Jiaheng Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Wanli Ouyang. 2025a. Flow-grpo: Training flow matching models via online rl. arXiv preprint arXiv:2505.05470." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.108, + 0.487, + 0.921 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.511, + 0.086, + 0.883, + 0.14 + ], + "angle": 0, + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025b. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.152, + 0.883, + 0.232 + ], + "angle": 0, + "content": "Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025a. Deepcoder: A fully open-source 14b coder at o3-mini level. . Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.245, + 0.883, + 0.324 + ], + "angle": 0, + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025b. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl. . Notion Blog." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.337, + 0.883, + 0.415 + ], + "angle": 0, + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.43, + 0.883, + 0.483 + ], + "angle": 0, + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.496, + 0.883, + 0.575 + ], + "angle": 0, + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, and 1 others. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.588, + 0.883, + 0.654 + ], + "angle": 0, + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.667, + 0.876, + 0.682 + ], + "angle": 0, + "content": "Zhiqing Sun. 2024. Aimo-cmu/math/cmu/math-aimo." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.695, + 0.883, + 0.761 + ], + "angle": 0, + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.774, + 0.883, + 0.801 + ], + "angle": 0, + "content": "Qwen Team. 2025. Qwq-32b: Embracing the power of reinforcement learning." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.814, + 0.883, + 0.881 + ], + "angle": 0, + "content": "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-r1: Curriculum sft, dpo and r1 for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460." + }, + { + "type": "ref_text", + "bbox": [ + 0.512, + 0.894, + 0.883, + 0.921 + ], + "angle": 0, + "content": "Lilian Weng. 2024. Reward hacking in reinforcement learning. _lianweng.github.io_." + }, + { + "type": "list", + "bbox": [ + 0.511, + 0.086, + 0.883, + 0.921 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.166 + ], + "angle": 0, + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, and 1 others. 2024. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.175, + 0.49, + 0.228 + ], + "angle": 0, + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + }, + { + "type": "list", + "bbox": [ + 0.117, + 0.086, + 0.49, + 0.228 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "title", + "bbox": [ + 0.118, + 0.082, + 0.406, + 0.099 + ], + "angle": 0, + "content": "A Evaluations on Coding Tasks" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.109, + 0.488, + 0.221 + ], + "angle": 0, + "content": "We evaluate our proposed LEAD-14B model against the original DeepSeek-R1-Distill-Qwen-14B baseline on the LiveCodeBench benchmark under a maximum sequence length of 8k tokens. The dataset version used is release_v5, consisting of 880 code generation tasks. Results are summarized in Table 3." + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.224, + 0.49, + 0.464 + ], + "angle": 0, + "content": "As shown above, LEAD-14B achieves higher accuracy (0.5156 vs. 0.5103) while producing slightly longer completions. This suggests that our method enhances reasoning capability in code generation. Regarding the observed increase in chain-of-thought (CoT) length, we hypothesize that this effect arises because our training focused exclusively on mathematical reasoning datasets. While our method compresses reasoning paths in math domains, such compression does not appear to generalize as effectively to code. Combined with the improved reasoning capability that may increase the overall reasoning path, this may explain why generated sequences are overall longer in coderelated tasks." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.477, + 0.435, + 0.51 + ], + "angle": 0, + "content": "B Detailed Analysis on AIME25 by Difficulty" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.519, + 0.488, + 0.614 + ], + "angle": 0, + "content": "To further analyze model performance, we stratified the AIME25 dataset into three difficulty levels based on the problem number: normal (problems 1-5), difficult (problems 6-10), and highly difficult (problems 11-15). The detailed evaluation results for each stratum are presented in Table 4." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.616, + 0.489, + 0.793 + ], + "angle": 0, + "content": "The stratified results in Table 4 support the hypothesis that advantage reweighting enhances a model's ability to solve more difficult problems. This is evidenced by the widening performance gap in Pass@1 between GRPO+L and GRPO+LAD as problem difficulty increases. For normal problems, GRPO+LAD offers a modest \\(1.95\\%\\) improvement over GRPO+L. This margin increases substantially to \\(13.7\\%\\) for difficult problems, indicating that the benefits of advantage reweighting are more pronounced in challenging scenarios." + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.794, + 0.489, + 0.922 + ], + "angle": 0, + "content": "For highly difficult problems, the Pass@1 scores for GRPO+L and GRPO+LAD are identical. Neither method incorporates an explicit penalty for incorrect answers, making them susceptible to generating numerous wrong solutions. This tendency leads to unstable majority voting-based accuracy (Cons@32), a vulnerability that is magnified by the intrinsic difficulty of the problems." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.083, + 0.884, + 0.308 + ], + "angle": 0, + "content": "In contrast, the introduction of an explicit penalty in GRPO+LEAD demonstrates a clear regularization effect. On the most difficult problem set, GRPO+LEAD achieves the highest accuracy (Cons@32 of 0.4) and more than doubles the precision of both GRPO+L (0.172) and GRPO+LAD (0.156); the number of correct answers generated by GRPO+LEAD is comparable to both GRPO+L and GRPO+LAD, despite generating much fewer total answers. This validates our hypothesis that the explicit penalty effectively \"regularizes thinking\", discouraging the kind of hasty and incorrect responses that the length reward tends to encourage otherwise." + }, + { + "type": "title", + "bbox": [ + 0.51, + 0.322, + 0.822, + 0.354 + ], + "angle": 0, + "content": "C Qualitative Analysis of Solution Conciseness" + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.364, + 0.884, + 0.476 + ], + "angle": 0, + "content": "To provide a qualitative illustration of how the length reward enhances conciseness, we contrast the shortest correct solutions generated by GRPO+L and the baseline Deepseek-7B for the same problem (Problem 3, AIME 25 I). Table 5 breaks down the comparison across key aspects of readability and reasoning structure." + }, + { + "type": "text", + "bbox": [ + 0.508, + 0.477, + 0.884, + 0.637 + ], + "angle": 0, + "content": "As the comparison highlights, the GRPO+L model produces a tight, step-by-step solution that remains focused, avoids repetition, and concludes efficiently. In contrast, the Deepseek-7B baseline's reasoning path is less direct, characterized by repeated self-checks and conversational digressions that nearly double the total length and reduce clarity. This case study demonstrates that our length-reward mechanism successfully encourages a more disciplined and economical reasoning style." + } + ], + [ + { + "type": "table", + "bbox": [ + 0.122, + 0.085, + 0.875, + 0.149 + ], + "angle": 0, + "content": "
ModelAccuracyAvg. Tokens (Overall)EasyMediumHard
LEAD-14B0.51566322399869128000
DeepSeek-R1-Distill-Qwen-14B0.51035794304664297856
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.159, + 0.882, + 0.189 + ], + "angle": 0, + "content": "Table 3: Performance on LiveCodeBench (release_v5) with maximum sequence length of 8k tokens. All token counts are rounded to the nearest integer." + }, + { + "type": "table", + "bbox": [ + 0.174, + 0.202, + 0.825, + 0.508 + ], + "angle": 0, + "content": "
ModelCons@32Avg. CorrectAvg. AnswerPrecisionPass@1
Normal Problems (1–5)
Deepseek-7B0.818.820.30.7080.588
GRPO + L0.819.727.60.6310.616
GRPO + LAD0.920.126.90.6870.628
GRPO + LEAD0.822.024.50.7230.688
Difficult Problems (6–10)
Deepseek-7B0.48.313.80.4040.259
GRPO + L0.58.624.10.4120.269
GRPO + LAD0.69.824.20.4480.306
GRPO + LEAD0.69.720.00.4210.303
Highly Difficult Problems (11–15)
Deepseek-7B0.20.92.00.2300.028
GRPO + L0.31.313.90.1720.041
GRPO + LAD0.21.314.60.1560.041
GRPO + LEAD0.41.57.70.3550.047
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.518, + 0.884, + 0.561 + ], + "angle": 0, + "content": "Table 4: Consolidated evaluation results on AIME25, stratified by problem difficulty. Avg. Answers refers to the number of outputs that have completed within the 8k token budget and produce some, where as Avg. Correct refers to the correct answers. Precision is Avg. Correct/ Avg. Answers." + }, + { + "type": "table", + "bbox": [ + 0.124, + 0.574, + 0.875, + 0.876 + ], + "angle": 0, + "content": "
AspectGRPO+LDeepseek-7B
Structure & flow“Step 1: enumerate all possible triples ... Step 2: compute the multinomial coefficient ... Step 3: sum and mod.”“Okay ... let me parse this step by step ... but wait, hold on ... let me verify the triples again ...”
Redundancy“...hence, all possible triples: (6, 2, 1), (5, 3, 1), (4, 3, 2).”“So, the possible triples ... So, three triples in total ... Wait, hold on, let me check if there are more ... So, total three triples.”
Conciseness of language“Total N = 2016. Therefore, the remainder is 16.”“Wait, hold on a second. ... Maybe I can think of all possible partitions ... No, I think the only possible triples are the three we found.”
Logical signposting“Case 1: s = 1 ... Case 2: s = 2 ... Case 3: s = 3 (no solutions).”“Case 1: S = 1 ... Subcase 1a ... Subcase 1b ... (digression) ... Case 3: S = 3 ... no solutions ... (returns to earlier cases).”
Error-checking“Only three possible triples, so the computation is complete.”“Wait, hold on a second. Is that all? ... let me verify the triples again ... maybe there are other triples?”
LengthEntire solution ≈ 200 words.Entire solution ≈ 370 words (many repeated sentences such as “So, I think 16 is the answer”).
" + }, + { + "type": "table_caption", + "bbox": [ + 0.113, + 0.886, + 0.884, + 0.916 + ], + "angle": 0, + "content": "Table 5: Qualitative comparison of the shortest correct rollouts from GRPO+L and Deepseek-7B for AIME 25 I, Problem 3. Italicized text in the Deepseek-7B column represents meta-commentary or self-correction loops." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_origin.pdf b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..825698e110e4405222c141113a2f01311b72d5a9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/51883cd5-a1e4-420d-872b-48483ba1aaba_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ed4b013aa92adc271cce292575af8ec2872addf4cce9ff7457a7573ccad5921 +size 754025 diff --git a/data/2025/2504_09xxx/2504.09696/full.md b/data/2025/2504_09xxx/2504.09696/full.md new file mode 100644 index 0000000000000000000000000000000000000000..a800d0932f11bc2857678ac89d0bde00d11a08d6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/full.md @@ -0,0 +1,320 @@ +# GRPO-LEAD: A Difficulty-Aware Reinforcement Learning Approach for Concise Mathematical Reasoning in Language Models + +Jixiao Zhang* Johns Hopkins University jzhan432@jh.edu + +Chunsheng Zuo* Johns Hopkins University czuo3@jh.edu + +# Abstract + +Group Relative Policy Optimization (GRPO), which is widely adopted by R1-like reasoning models, has advanced mathematical reasoning. Nevertheless, GRPO faces challenges in reward sparsity, verosity, and inadequate focus on problem difficulty. We propose GRPO-LEAD, enhancing GRPO with: (1) length-regularized rewards to encourage conciseness while maintaining accuracy; (2) explicit penalties for incorrect solutions to improve model precision; and (3) difficulty-aware advantage reweighting for robust generalization on challenging problems. Comprehensive evaluations demonstrate that GRPO-LEAD significantly improves reasoning accuracy, conciseness, and efficiency. Our approach achieves state-of-the-art performance for 14B-scale models, underscoring the synergy of our methods with appropriate model scale and high-quality data. Our source code, generated dataset, and models are available at https://github.com/aeroplanepaper/GRPO-LEAD. + +# 1 Introduction + +Recently, R1-like reasoning models have attracted significant attention due to their impressive performance in solving challenging mathematical reasoning tasks through extensive chains of thought (Luo et al., 2025b; Wen et al., 2025). According to the technical report introducing R1 (Guo et al., 2025), reinforcement learning (RL) fine-tuning plays a pivotal role in enabling this reasoning capability. In particular, Group Relative Policy Optimization (GRPO) (Shao et al., 2024), a novel RL approach for language models, has emerged as a promising alternative to traditional methods such as PPO (Schulman et al., 2017) and DPO (Rafailov et al., 2023), primarily due to its efficiency and intrinsic compatibility with language model training. Researchers across various domains have successfully employed GRPO (Li et al., 2025; Liu et al., + +2025a; Luo et al., 2025a; Dai et al., 2025), achieving impressive outcomes. + +Despite its strengths, existing GRPO implementations encounter significant limitations. A primary issue is reward sparsity stemming from binary, rule-based accuracy metrics; when responses within problem groups exhibit uniform correctness or incorrectness, the resulting uniform reward signals offer minimal differentiation, weakening learning gradients and hampering convergence. Moreover, such uniform signals inadequately promote concise reasoning, leading to unnecessarily verbose outputs and inefficiencies during training and inference. Additionally, the current reward formulation lacks explicit penalties for incorrect answers (Hu et al., 2025a; Luo et al., 2025b; Chu et al., 2025), inadvertently encouraging models to guess rather than engage in rigorous reasoning, thereby compromising precision. Furthermore, rewards are applied uniformly across problems regardless of their intrinsic difficulty, causing models to excessively optimize simpler tasks while neglecting more challenging problems that require deeper reasoning. + +Furthermore, computational efficiency also emerges as a critical practical concern, as reinforcement learning fine-tuning typically demands substantial resources, limiting accessibility, experimentation speed, and scalability, especially in low-resource environments. The current GRPO formulation is insufficient for encouraging concise and precise reasoning. Consequently, reducing computational requirements during both training and inference is essential for enabling broader applicability and effective real-world deployment. + +Motivated by these limitations, this work introduces GRPO-LEAD, a suite of targeted modifications explicitly designed to enhance GRPO's effectiveness for mathematical reasoning tasks. The overall framework is illustrated in Figure 1. Our key contributions include: + +![](images/9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg) +Figure 1: The GRPO-LEAD framework assigns length-regularized positive rewards to correct answers and explicit penalties to incorrect ones. A difficulty-based weight $w$ used for advantage reweighting is determined from the empirical correctness of responses for each question. This weight then scales the advantages derived from each question, prioritizing harder questions over easier ones during the policy update to foster robust reasoning. + +- We introduce a length-regularized reward with an explicit penalty for incorrect solutions to encourage solution conciseness while maintaining accuracy. +- We apply difficulty-aware advantage reweighting to focus learning on more challenging problems, fostering robust generalization. +- Our comprehensive evaluations demonstrate GRPO-LEAD significantly improves reasoning accuracy and conciseness, achieving state-of-the-art performance in mathematical reasoning for 14B-scale models. + +# 2 Related Work + +# 2.1 Group Relative Policy Optimization + +Group Relative Policy Optimization (GRPO) is a recently proposed algorithm designed specifically for fine-tuning language models with group-level normalization of rewards (Guo et al., 2025). GRPO modifies the standard policy gradient objective by introducing relative advantages within sets of responses corresponding to the same query, stabilizing updates and promoting consistent learning signals. Formally, GRPO defines the objective as: + +$$ +\begin{array}{l} \mathcal {L} _ {\mathrm {G R P O}} (\theta) = \frac {1}{G} \sum_ {i = 1} ^ {G} \frac {1}{| o _ {i} |} \sum_ {t = 1} ^ {| o _ {i} |} \left[ \min \left(r _ {i, t} (\theta) \hat {A} _ {i, t}, \right. \right. \tag {1} \\ \left. \operatorname {c l i p} \left(r _ {i, t} (\theta), 1 - \epsilon , 1 + \epsilon) \hat {A} _ {i, t}\right) \right] \\ \end{array} +$$ + +where the importance sampling ratio is given by + +$$ +r _ {i, t} (\theta) = \frac {\pi_ {\theta} \left(o _ {i , t} \mid q , o _ {i , < t}\right)}{\pi_ {\theta_ {\mathrm {o l d}}} \left(o _ {i , t} \mid q , o _ {i , < t}\right)}. \tag {2} +$$ + +Here, $G$ denotes the number of groups (e.g., different queries), $\hat{A}_{i,t}$ is the normalized advantage within group $i$ , and $\epsilon$ defines the clipping range for conservative updates. + +# 2.2 Length Reward + +A prevalent issue in reinforcement learning-based fine-tuning of language models is reward hacking (Everitt et al., 2017; Gao et al., 2023; Weng, 2024). In GRPO, when the model is trained with a large fixed budget, it can exploit this budget by producing an excessive number of extra reasoning and verification steps to ensure the correctness of the answer and therefore reach a higher reward. This phenomenon leads to unnecessarily verbose responses that lack conciseness and hinder interpretability, resulting in inefficiency in reasoning and reducing the model's practicality. + +Existing efforts to mitigate this problem typically involve incentivizing shorter answers to encourage more succinct reasoning processes. For example, Kimi proposed an individual min-max normalized length reward based on the lengths of generated responses (Team et al., 2025). Yeo et al. introduced a cosine length reward function with fixed maximum and minimum thresholds to manage response lengths (Yeo et al., 2025). Aggarwal et al. + +utilized a target "golden length" to directly reward or penalize responses based on their deviation from an ideal length (Aggarwal and Welleck, 2025). + +However, these existing methods depend heavily on static or predefined length heuristics, limiting their effectiveness across diverse questions of varying complexity. In contrast, our proposed length-dependent accuracy reward addresses these limitations by dynamically calibrating rewards according to each group's relative response length and rollout accuracy, promoting concise yet difficulty-aware reasoning processes. + +# 3 Method + +To systematically address the limitations identified in existing implementations of Group Relative Policy Optimization (GRPO), we propose a suite of novel modifications collectively termed GRPO-LEAD (GRPO with Length-dependent rewards, Explicit penalties, and Advantage reweighting for Difficulty). Our proposed method enhances the original GRPO framework by introducing three core innovations: 1) a length-dependent accuracy reward to foster concise solutions, 2) an explicit penalty mechanism to mitigate low precision rate caused by length reward, and 3) a difficulty-aware advantage reweighting strategy that amplifies learning signals for challenging problems. Additionally, we examine how base model scale and supervised fine-tuning (SFT) impact the effectiveness of reinforcement learning (RL) fine-tuning. + +# 3.1 Length-Dependent Accuracy Reward + +The core idea is to reward correct completions not uniformly but in proportion to their relative conciseness. Given a question $q$ and a set of model-generated responses $\{o_i\}$ , we first isolate the subset of correct responses and compute the mean $\mu$ and standard deviation $\sigma$ of their token lengths. For a correct response $o$ with length $|o|$ , we define its standardized length deviation as: + +$$ +z = \frac {| o | - \mu}{\sigma + \epsilon}, \tag {3} +$$ + +where $\epsilon > 0$ is a small constant added for numerical stability. The final reward is modulated using an exponential decay function: + +$$ +R _ {\text {a c c u r a c y}} (o | q) = \left\{ \begin{array}{l l} \exp (- \alpha z), & \text {i f o i s c o r r e c t ,} \\ 0, & \text {i f o i s i n c o r r e c t .} \end{array} \right. \tag {4} +$$ + +where $\alpha > 0$ is a tunable hyperparameter controlling the strength of length penalization. + +This formulation ensures that overly long correct responses are systematically penalized, while relatively concise ones are amplified. Unlike static or absolute length constraints, our approach leverages standardized deviation, allowing for dynamic adaptation to the distributional properties of each question. + +# 3.2 Explicit Penalty for Incorrect Answers to Enhance True Accuracy + +Existing methods often prioritize maximizing $\text{pass} @ 1$ — the success rate on the first attempt—typically within restricted response lengths. However, this focus can inadvertently degrade overall model accuracy. The fundamental issue appears to stem from the use of a binary accuracy reward, rather than length-based regularization: under pressure to generate responses within a limited length, a model is encouraged to provide an answer, even if it's a guess, rather than no answer at all. Such guesses can achieve a non-zero reward and inflate $\text{pass} @ 1$ , but they do so at the cost of overall precision by rewarding less rigorous reasoning. + +To counteract this tendency and foster a more robust distinction between correct and incorrect outputs, we introduce a revised reward structure that explicitly penalizes incorrect responses. This new reward function is defined as: + +$$ +R _ {\text {a c c u r a c y}} (o \mid q) = \left\{ \begin{array}{l l} \exp (- \alpha z), & \text {i f o i s c o r r e c t ,} \\ - 1, & \text {i f o i s i n c o r r e c t ,} \end{array} \right. \tag {5} +$$ + +where $o$ is the output, $q$ is the question, $z$ represents the standardized length deviation of a correct response, and $\alpha > 0$ is a hyperparameter controlling the strength of the length penalization for correct answers, consistent with prior definitions. + +The expected reward for a response, given its probability of correctness $P(\text{correct})$ , under this formulation is: + +$$ +\begin{array}{l} \mathbb {E} \left[ R _ {\text {a c c u r a c y}} (o \mid q) \right] = P (\text {c o r r e c t}) \cdot \exp (- \alpha z) \\ - (1 - P (\text {c o r r e c t})) \tag {4} \\ \end{array} +$$ + +To intuitively grasp the impact of this reward function, let us consider a simplified scenario where the length penalty for correct answers is negligible (i.e., $\exp (-\alpha z)\approx 1$ ). In practice, the average reward for correct answers often normalizes close to this value. Under this assumption, the expected reward + +simplifies to: + +$$ +\mathbb {E} [ R ] \approx 2 P (\text {c o r r e c t}) - 1 \tag {5} +$$ + +This approximation reveals a crucial characteristic: the expected reward becomes positive only when $P(\mathrm{correct}) > 0.5$ . This threshold acts as a principled deterrent against speculative guessing, compelling the model to internalize a more stringent decision boundary for correctness. Our empirical results confirm that this approach significantly improves both $pass@1$ and overall precision, encouraging the model to favor accuracy over mere completion. + +# 3.3 Advantage Reweighting for Difficulty-Aware Training + +While length reward and advantage reweighting can enhance precision and mitigate morbidity, uniformly applying rewards across all questions, irrespective of their intrinsic difficulty, may implicitly bias the model. It might learn to excessively optimize performance on simpler tasks—where correct and concise responses are more readily achieved—while neglecting more complex questions that demand deeper reasoning. Consequently, the performance on challenging problems can degrade. + +Therefore, we introduce a difficulty-aware advantage reweighting strategy, which dynamically adjusts the magnitude of policy updates based on an estimate of problem difficulty. The intuition is to amplify learning signals for harder tasks, re-anchoring the model towards harder tasks. + +Formally, we first quantify problem difficulty. For a given question $q$ and its associated set of sampled responses $\{o_i\}$ , we define the group's empirical correctness ratio as: + +$$ +\rho_ {q} = \frac {\text {n u m b e r o f c o r r e c t r e s p o n s e s f o r} q}{\text {t o t a l n u m b e r o f r e s p o n s e s f o r} q}. \tag {6} +$$ + +This ratio, $\rho_{q}$ , serves as an inverse proxy for problem difficulty: a lower $\rho_{q}$ suggests a harder question. + +Next, we introduce a logistic reweighting factor dependent on this ratio to modulate the advantage estimates during the RL training step. The logistic function is defined as: + +$$ +w \left(\rho_ {q}\right) = A + \frac {B - A}{1 + \exp \left[ k \left(\rho_ {q} - \rho_ {0}\right) \right]}, \tag {7} +$$ + +where hyperparameters $A, B, \rho_0, k$ allow precise control over the sensitivity of weighting to problem difficulty. + +To apply this reweighting, we first consider the normalized advantage estimate for a response $o_i$ to question $q$ : + +$$ +\tilde {A} _ {i} = \frac {R \left(o _ {i} | q\right) - \mu_ {q}}{\sigma_ {q} + \epsilon}, \tag {8} +$$ + +where $\mu_q$ and $\sigma_q$ are the mean and standard deviation of rewards $R(o_i|q)$ for responses to question $q$ , and $\epsilon$ is a small constant for numerical stability. We then define the difficulty-aware advantage, $A_i'$ , as: + +$$ +A _ {i} ^ {\prime} = \tilde {A} _ {i} \cdot \left\{ \begin{array}{l l} w \left(\rho_ {q}\right), & \text {i f} \tilde {A} _ {i} > 0 \\ w \left(1 - \rho_ {q}\right), & \text {i f} \tilde {A} _ {i} \leq 0 \end{array} \right. \tag {9} +$$ + +This formulation ensures that for difficult problems (low $\rho_{q}$ ), correct responses (which are rare and thus highly valuable) receive substantially larger updates due to the increased weighting $w(\rho_q)$ . Conversely, incorrect responses on easier problems (high $\rho_{q}$ ) are penalized more strongly, sharpening the decision boundary for problems where high performance should be expected. + +# 3.4 Impact of Data Quality on Reinforcement Learning Effectiveness + +To further enhance model capabilities, we first performed supervised fine-tuning (SFT) on a specialized dataset of 13k math reasoning problems sourced from DeepScaler (Luo et al., 2025b) (including historical AMC, AIME, and OmniMath problems) with solutions generated by QwQ32B (Team, 2025). Although this SFT model initially showed signs of overfitting, subsequent application of our proposed RL strategies rapidly mitigated these issues. This SFT+RL approach yielded faster convergence and significantly improved pass@1 accuracy and overall precision compared to applying RL directly to the original base model. + +Our findings also highlight the critical role of data quality and curriculum strategies in RL. We established a robust initial policy by applying RL to a subset of challenging problems from the DeepScaler dataset. This policy was then further refined using a curriculum composed of the most challenging problems identified from this first RL stage and supplemented by high-difficulty examples from the Light-R1 dataset (Wen et al., 2025). This two-stage curriculum markedly enhanced the model's ability to continuously improve on complex tasks. + +Finally, we addressed a persistent formatting issue of repetitive n-gram patterns, likely stemming from an absence of clear end-of-sequence (EOS) signals during SFT. By temporarily removing length-dependent rewards and introducing an explicit negative reward $(-1.5)$ for such repeated ngrams, we achieved further improvements in precision and pass@1 metrics. This intervention demonstrates the effectiveness of targeted reward modifications for mitigating specific output anomalies. + +In summary, our experiments affirm that initial model capacity, curated data curricula for RL, and targeted reward engineering are pivotal for optimizing fine-tuning outcomes. These elements collectively inform a systematic approach for enhancing language models' ability to produce concise, accurate, and well-structured responses across tasks of varying complexity. + +# 4 Experimental Setup + +We evaluate GRPO-LEAD, integrating length-dependent accuracy rewards, explicit penalties for incorrect solutions, and difficulty-aware advantage reweighting, on DEEPSEEK-R1 DISTILLED variants (Guo et al., 2025; Yang et al., 2024). Our experiments cover two model scales, 7B and 14B parameters. All GRPO training is conducted using the VERL framework.(Sheng et al., 2024). + +# 4.1 Datasets and Filtering + +Our primary training data is sourced from the DEEPSCALER dataset (Luo et al., 2025b). We filter out problems with difficulty ratings below 2.5, resulting in approximately 9,000 questions for fine-tuning. + +For stage 2 of our 14B model experiments, we further refine the dataset by selecting problems where the model's stage-1 rollout accuracy is no greater than $75\%$ , yielding around 2,283 questions. Additionally, we incorporate challenging problems with numeric answers from the stage-2 dataset of Light-R1 (Wen et al., 2025), resulting in 3,524 questions in total. + +# 4.2 Hyperparameters + +We train with a learning rate of $1 \times 10^{-6}$ , batch size 32, and group size 8—generating 8 rollouts per question for GRPO reward computation. The KL penalty term is removed, as it was found to suppress exploration in our experiments, which is also suggested in similar works (Liu et al., 2025b; Hu et al., 2025b). + +For the length-dependent accuracy reward, we set $\alpha = 0.05$ , providing a moderate decay that encourages conciseness without penalizing slight morbidity. For difficulty-aware advantage reweighting, we use $A = 0.4$ , $B = 1.5$ , $\rho_0 = 0.75$ , and $k = 10$ . This configuration ensures reweighting is minimal on easy problems but increases sharply near the $75\%$ correctness threshold. The steep slope ( $k = 10$ ) enables strong emphasis on high-difficulty examples, guiding the model to allocate learning more effectively. + +# 4.3 Model Variants and Fine-Tuning Stages + +7B Model Experiments Starting from the DeepSeek-R1 Distilled 7B Qwen-Math checkpoint, we first apply standard GRPO on the 9k questions, producing a baseline. Then, we train three more models from the DeepSeek-R1 Distilled 7B QwenMath checkpoint, adding one more of the following components subsequently: (i) Length Reward only, (ii) Length Reward + Advantage Reweighting, (iii) Length Reward + Advantage Reweighting + Explicit Penalty. We train for approximately 200 steps and select the top-performing checkpoints based on validation results. At test time, we limit the generation length to 8k for all 7B models, matching the training length limit. + +14B Model Experiments We extend the above procedure to the DeepSeek-R1 Distilled 14B Qwen checkpoint across multiple stages. In Stage 1, we train for 100 steps using all GRPO-LEAD components on the filtered 9k-question dataset. To enhance the model's base capability, we first fine-tune the model on a curated set of 13k math problems with supervised fine-tuning (SFT), then conduct the RL phase. This SFT stage significantly improves the model's reasoning quality, even though it tends to increase the output length and caused some format errors. + +The SFT data consists of all problems in the DEEPSCALER dataset with difficulty greater than 1. To construct high-quality reasoning traces for SFT, we use the QWQ-32B model (Team, 2025) to generate step-by-step solutions. + +After observing that some questions remain low correctness, we further fine-tune for Stage 2 to focus on those underperformed problems. We also address the repetitive output patterns by removing the length penalty and introducing a negative reward $(-1.5)$ for repeated $n$ -grams. We continue training for 240 more steps (100 steps with initial settings + +and 140 more steps with repetition penalty), yielding the final model checkpoint. At test time, we limit the generation length to 14k for all 14B models, in accordance with our training settings and also to better compare the models' performance in a low-budget scenario. + +# 4.4 Baselines and Evaluation Protocol + +We compare our models with both DEEPSEEK-R1 distilled-14B-Qwen (Guo et al., 2025) (the distilled Qwen model without GRPO-LEAD) and LIGHT-R1-14B-DS (Wen et al., 2025), which has the same base model as ours and was first finetuned with 3k hard math problems with SFT, and then fine-tuned with a cosine-based length reward (Yeo et al., 2025) on their selected math problems for three epochs using GRPO. + +We primarily report three metrics: (1) Cons@32, accuracy through majority voting for 32 samplings; (2) Pass@1, the probability that the top-1 sample is correct under a chosen decoding strategy; (3) Average Length $(\mathrm{Len}_{\mathrm{avg}})$ , measuring morbidity. Unless otherwise specified, we decode with temperature 0.6 and sample 32 solutions per question, then compute Cons@32 and Pass@1 over these samples. + +# 5 Results + +In this section, we present a comprehensive evaluation of the proposed GRPO-LEAD framework on two mathematical benchmarks: AIME24 and AIME25. Our analysis is structured as follows: we first examine training dynamics to illustrate how GRPO-LEAD accelerates convergence; next, we perform an ablation study to assess the incremental benefits of each component; and finally, we compare against state-of-the-art baselines for 14B-scale language models. + +# 5.1 Training Dynamics + +Figure 2 plots the evolution of Pass@1 on a validation split over training steps for three configurations of the 7B model: (i) baseline GRPO, (ii) GRPO with length reward, and (iii) GRPO with both length reward and advantage reweighting. We observe two clear trends. First, adding a length-dependent reward not only yields higher Pass@1 but also accelerates early-stage convergence, suggesting that penalizing overly verbose correct solutions provides a more informative learning signal. + +![](images/59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg) +Figure 2: Validation* Pass@1 over training steps for three configurations: GRPO, GRPO+L, and GRPO+LAD. As shown by the faster convergence, length reward and advantage reweighting provide a richer reward signal signal than the original setup. + +Second, incorporating advantage reweighting (to amplify updates on harder questions) further steepens the trajectory, indicating that reweighting advantage estimates according to problem difficulty helps the model refine reasoning on challenging prompts more efficiently. + +Overall, these dynamics confirm that GRPO-LEAD components—particularly the length reward—bolster training stability and speed. By comparison, the baseline GRPO model learns more slowly and lags behind in Pass@1 across the entire training horizon. + +# 5.2 Ablation Analysis + +We next quantify the contribution of each GRPO-LEAD component through a step-by-step ablation on the 7B model. Table 1 summarizes results on AIME24 and AIME25. + +Length Reward Brings Conciseness to Reasoning We first incorporate the length-dependent accuracy reward into GRPO. Compared to Deepseek-7B, length reward slightly improves Pass@1 on both AIME24 by $1.6\%$ ( $0.431 \rightarrow 0.438$ ) and AIME25 by $5.4\%$ ( $0.292 \rightarrow 0.308$ ), with an additional improvement of Cons@32 by $14.1\%$ on AIME25. Notably, these improvements are accompanied by a substantial reduction of 1,715 tokens ( $24.5\%$ ) and 1,903 tokens ( $26.8\%$ ) in the average response length on the two datasets, respectively. Figure 3 further demonstrates that length reward largely enhances performance in low-budget settings over the base model, matching its peak performance with only 5/8 of the token budget on the more difficult AIME25. These results demonstrate that length reward, by penalizing correct but overly verbose solutions, can effectively reduce unnec + +
Ablation SettingAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
Deepseek-7B0.7670.4316,9900.4670.2927,113
GRPO + len. reward0.7670.4385,2750.5330.3085,210
+ adv. reweighting0.7670.4585,3230.5670.3255,437
+ explicit penalty0.8000.4706,1040.5670.3456,308
+ +Table 1: Ablation results on AIME24 and AIME25. We report Cons@32 (accuracy through majority voting for 32 samplings), Pass@1, and the average token length (Lenavg). The best value in each column is in boldface, the second best is underlined. + +![](images/f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg) +(a) AIME24 + +![](images/f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg) +(b) AIME25 +Figure 3: Performance against inference budget for training done with different ablations of LEAD. GRPO with length reward (GRPO+L) largely enhances the performance at low budget settings compared to before training (DeepseekR1-7B). + +essary text without compromising overall performance. + +Advantage Reweighting Encourages Model to Solve More Difficult Problems Further incorporating difficulty-aware advantage reweighting (GRPO+LAD) refines performance. On AIME24, Pass@1 increases from the GRPO+L stage by $4.8\%$ $(0.438 \rightarrow 0.458)$ , while Cons@32 remains 0.767. For AIME25, both Pass@1 and Cons@32 improve by $5.5\%$ $(0.308 \rightarrow 0.325)$ and $6.4\%$ $(0.533 \rightarrow 0.567)$ , respectively. As Figure 3 shows, GRPO+LAD demonstrates gains over GRPO+L in almost all budget regimes on AIME25 and for budgets exceeding 5k tokens on AIME24. These results indicate that advantage reweighting, by prioritizing challenging problems, strengthens reasoning robustness and mitigates over-reliance on simpler examples, thus validating its role in driving more reliable generalization. + +Explicit Penalty for Incorrect Answers Regularizes Thinking Finally, introducing an explicit penalty for incorrect solutions (GRPO+LEAD) yields the highest Pass@1 scores. On AIME24, Pass@1 and Cons@32 improve from the GRPO+LAD stage by $2.6\%$ ( $0.458 \to 0.470$ ) and $4.3\%$ ( $0.767 \to 0.800$ ), respectively. On AIME25, Pass@1 also increases by $6.2\%$ ( $0.325 \to 0.345$ ), as detailed in Table 1. Notably, these gains involve a modest increase in average solution length on AIME24 (from approximately 5,300 to 6,104 tokens). Figure 3 illustrates this trade-off, showing a performance sacrifice in low-budget regimes, though GRPO+LEAD still outperforms GRPO+LAD with budgets higher than 5k tokens on AIME25. These results suggest that the explicit penalty serves as a regularizer for the model to be more conservative about its reasoning. Such regularization boosts performance while requiring a slightly longer thinking process, which + +
Model NameAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
DeepSeek-14B0.8000.6149,1820.6330.42910,046
Light-R1-14B-DS0.8330.6419,5710.7670.50510,194
LEAD-stage10.8330.6298,7900.7670.5239,371
LEAD-stage20.8670.6508,2670.7670.5398,668
+ +Table 2: Comparison of model performance on AIME24 and AIME25, showing Cons@32, Pass@1, and average token length (Lenavg). The best value in each column is in boldface, the second best is underlined. + +nevertheless remains shorter than the Deepseek-7B baseline. + +Overall, these ablation results confirm that all three enhancements—length-dependent accuracy, difficulty-aware advantage reweighting, and explicit penalties—collectively reduce morbidity, strengthen mathematical skills on harder questions, and elevate precision in final predictions. + +# 5.3 Comparison with Baselines + +We next evaluate GRPO-LEAD at the 14B scale and compare it against two strong baselines under a 14k-token generation budget: DeepSeek-14B and the state-of-the-art Light-R1-14B-DS. Table 2 presents results on AIME24 and AIME25, including both our intermediate model (LEAD-stage1) and our final model (LEAD-stage2). + +AIME24 Performance LEAD-stage1 achieves a Cons@32 of 0.833, matching Light-R1-14B-DS and exceeding DeepSeek-14B by $4.1\%$ . Its Pass@1 outperforms DeepSeek-14B by $2.4\%$ and closely approaches Light-R1-14B-DS. Crucially, LEAD-stage1 produces more concise responses than both baselines, with more than 800 tokens less on average. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (4% above Light-R1-14B-DS) and the best Pass@1, while reducing average solution length to 8,267 tokens. + +AIME25 Performance LEAD-stage1 yields a Cons@32 of 0.767, matching Light-R1-14B-DS and exceeding DeepSeek-14B by $21.2\%$ . Its Pass@1 (0.523) outperforms DeepSeek-14B by $21.9\%$ and Light-R1-14B-DS by $3.6\%$ . Crucially, LEAD-stage1 produces more concise responses than both baselines, with its solutions averaging 9,371 tokens. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (matching Light-R1-14B-DS at 0.767) and the best Pass@1 (0.539), while reducing + +average solution length to 8,668 tokens. + +Overall, both LEAD-stage1 and LEAD-stage2 deliver substantial improvements over DeepSeek-14B and Light-R1-14B-DS, simultaneously boosting correctness and conciseness under a constrained (14k-token) budget. Remarkably, training LEAD-stage1 for just 100 steps—requiring only about 24 hours on eight H20 GPUs—already matches Light-R1-14B-DS on Cons@32 and outperforms it on AIME25 Pass@1 while producing shorter solutions, underscoring the practical efficiency of GRPO-LEAD for large-scale math problem-solving. + +# 6 Conclusion + +We introduced GRPO-LEAD, a reinforcement learning framework designed for mathematical reasoning tasks. By extending Group Relative Policy Optimization with three major components—(1) a length-dependent accuracy reward to discourage overly verbose solutions, (2) an explicit negative penalty that clarifies the boundary between correct and incorrect answers, and (3) a difficulty-aware advantage reweighting scheme to prioritize tougher problems—GRPO-LEAD addresses key challenges in structured problem-solving. + +Empirical evaluations on two AIME benchmarks show that GRPO-LEAD not only speeds up convergence but also strengthens the model's reasoning capability while keeping solution paths concise. Our 14B-scale experiments further confirm that GRPO-LEAD achieves state-of-the-art performance by balancing output brevity with high problem-solving accuracy. Although open questions remain—particularly in managing partial correctness and extending these techniques to broader domains—our findings suggest that reward shaping and difficulty modeling are pivotal in developing more robust and aligned language models for complex mathematical reasoning. + +# 7 Limitations + +Although our techniques for encouraging concise solutions and difficulty-balanced learning may transfer to other domains, the gains reported here are specific to mathematical reasoning tasks. Further studies are needed to evaluate the effectiveness of GRPO-LEAD on broader question-answering or logical reasoning domains, where correctness signals and domain structures can differ substantially. + +Additionally, we only have access to a limited amount of compute, which prevents us from conducting more comprehensive experiments. For instance, we currently cannot provide the validation curve for the 7B model in the ablation study that adds an explicit penalty. This is due to an error in the validation code after upgrading to the newest VERL version, and we currently do not have the compute to reproduce it. A comparison with the original GRPO model is also missing, except for the curve shown in Figure 2, because the checkpoint was stored on a rented server that was automatically released as we were writing the paper. We also couldn't formally perform a hyperparameter search to showcase the rationale behind choosing the hyperparameters for our designed modifications. + +# References + +Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697. +Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. Gpg: A simple and strong reinforcement learning baseline for model reasoning. arXiv preprint arXiv:2504.02546. +Muzhi Dai, Chenxu Yang, and Qingyi Si. 2025. S-grpo: Early exit via reinforcement learning in reasoning models. arXiv preprint arXiv:2505.07686. +Tom Everitt, Victoria Krakovna, Laurent Orseau, Marcus Hutter, and Shane Legg. 2017. Reinforcement learning with a corrupted reward channel. arXiv preprint arXiv:1705.08417. +Simon Frieder, Sam Bealing, Armenii Nikolaiev, Geoff C. Smith, Kevin Buzzard, Timothy Gowers, Peter J. Liu, Po-Shen Loh, Lester Mackey, Leonardo de Moura, Dan Roberts, D. Sculley, Terence Tao, David Balduzzi, Simon Coyle, Alex Gerko, Ryan Holbrook, Addison Howard, and XTX Markets. 2024. Ai mathematical olympiad - progress prize 2. https://kaggle.com/competitions/ ai-mathematical-olympiad-progress-prize-2. Kaggle. +Leo Gao, John Schulman, and Jacob Hilton. 2023. Scaling laws for reward model overoptimization. In International Conference on Machine Learning, pages 10835-10866. PMLR. +Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948. +Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xi-angyu Zhang, and Heung-Yeung Shum. 2025a. Open reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290. +Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. 2025b. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290. +Xuying Li, Zhuo Li, Yuji Kosuga, and Victor Bian. 2025. Optimizing safe and aligned language generation: A multi-objective grpo approach. arXiv preprint arXiv:2503.21819. +Jie Liu, Gongye Liu, Jiajun Liang, Yangguang Li, Jiaheng Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Wanli Ouyang. 2025a. Flow-grpo: Training flow matching models via online rl. arXiv preprint arXiv:2505.05470. + +Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025b. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783. +Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025a. Deepcoder: A fully open-source 14b coder at o3-mini level. . Notion Blog. +Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025b. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl. . Notion Blog. +Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741. +John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347. +Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, and 1 others. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300. +Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256. +Zhiqing Sun. 2024. Aimo-cmu/math/cmu/math-aimo. +Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599. +Qwen Team. 2025. Qwq-32b: Embracing the power of reinforcement learning. +Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-r1: Curriculum sft, dpo and r1 for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460. +Lilian Weng. 2024. Reward hacking in reinforcement learning. _lianweng.github.io_. + +An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, and 1 others. 2024. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122. +Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373. + +# A Evaluations on Coding Tasks + +We evaluate our proposed LEAD-14B model against the original DeepSeek-R1-Distill-Qwen-14B baseline on the LiveCodeBench benchmark under a maximum sequence length of 8k tokens. The dataset version used is release_v5, consisting of 880 code generation tasks. Results are summarized in Table 3. + +As shown above, LEAD-14B achieves higher accuracy (0.5156 vs. 0.5103) while producing slightly longer completions. This suggests that our method enhances reasoning capability in code generation. Regarding the observed increase in chain-of-thought (CoT) length, we hypothesize that this effect arises because our training focused exclusively on mathematical reasoning datasets. While our method compresses reasoning paths in math domains, such compression does not appear to generalize as effectively to code. Combined with the improved reasoning capability that may increase the overall reasoning path, this may explain why generated sequences are overall longer in coderelated tasks. + +# B Detailed Analysis on AIME25 by Difficulty + +To further analyze model performance, we stratified the AIME25 dataset into three difficulty levels based on the problem number: normal (problems 1-5), difficult (problems 6-10), and highly difficult (problems 11-15). The detailed evaluation results for each stratum are presented in Table 4. + +The stratified results in Table 4 support the hypothesis that advantage reweighting enhances a model's ability to solve more difficult problems. This is evidenced by the widening performance gap in Pass@1 between GRPO+L and GRPO+LAD as problem difficulty increases. For normal problems, GRPO+LAD offers a modest $1.95\%$ improvement over GRPO+L. This margin increases substantially to $13.7\%$ for difficult problems, indicating that the benefits of advantage reweighting are more pronounced in challenging scenarios. + +For highly difficult problems, the Pass@1 scores for GRPO+L and GRPO+LAD are identical. Neither method incorporates an explicit penalty for incorrect answers, making them susceptible to generating numerous wrong solutions. This tendency leads to unstable majority voting-based accuracy (Cons@32), a vulnerability that is magnified by the intrinsic difficulty of the problems. + +In contrast, the introduction of an explicit penalty in GRPO+LEAD demonstrates a clear regularization effect. On the most difficult problem set, GRPO+LEAD achieves the highest accuracy (Cons@32 of 0.4) and more than doubles the precision of both GRPO+L (0.172) and GRPO+LAD (0.156); the number of correct answers generated by GRPO+LEAD is comparable to both GRPO+L and GRPO+LAD, despite generating much fewer total answers. This validates our hypothesis that the explicit penalty effectively "regularizes thinking", discouraging the kind of hasty and incorrect responses that the length reward tends to encourage otherwise. + +# C Qualitative Analysis of Solution Conciseness + +To provide a qualitative illustration of how the length reward enhances conciseness, we contrast the shortest correct solutions generated by GRPO+L and the baseline Deepseek-7B for the same problem (Problem 3, AIME 25 I). Table 5 breaks down the comparison across key aspects of readability and reasoning structure. + +As the comparison highlights, the GRPO+L model produces a tight, step-by-step solution that remains focused, avoids repetition, and concludes efficiently. In contrast, the Deepseek-7B baseline's reasoning path is less direct, characterized by repeated self-checks and conversational digressions that nearly double the total length and reduce clarity. This case study demonstrates that our length-reward mechanism successfully encourages a more disciplined and economical reasoning style. + +
ModelAccuracyAvg. Tokens (Overall)EasyMediumHard
LEAD-14B0.51566322399869128000
DeepSeek-R1-Distill-Qwen-14B0.51035794304664297856
+ +Table 3: Performance on LiveCodeBench (release_v5) with maximum sequence length of 8k tokens. All token counts are rounded to the nearest integer. + +
ModelCons@32Avg. CorrectAvg. AnswerPrecisionPass@1
Normal Problems (1–5)
Deepseek-7B0.818.820.30.7080.588
GRPO + L0.819.727.60.6310.616
GRPO + LAD0.920.126.90.6870.628
GRPO + LEAD0.822.024.50.7230.688
Difficult Problems (6–10)
Deepseek-7B0.48.313.80.4040.259
GRPO + L0.58.624.10.4120.269
GRPO + LAD0.69.824.20.4480.306
GRPO + LEAD0.69.720.00.4210.303
Highly Difficult Problems (11–15)
Deepseek-7B0.20.92.00.2300.028
GRPO + L0.31.313.90.1720.041
GRPO + LAD0.21.314.60.1560.041
GRPO + LEAD0.41.57.70.3550.047
+ +Table 4: Consolidated evaluation results on AIME25, stratified by problem difficulty. Avg. Answers refers to the number of outputs that have completed within the 8k token budget and produce some, where as Avg. Correct refers to the correct answers. Precision is Avg. Correct/ Avg. Answers. + +
AspectGRPO+LDeepseek-7B
Structure & flow“Step 1: enumerate all possible triples ... Step 2: compute the multinomial coefficient ... Step 3: sum and mod.”“Okay ... let me parse this step by step ... but wait, hold on ... let me verify the triples again ...”
Redundancy“...hence, all possible triples: (6, 2, 1), (5, 3, 1), (4, 3, 2).”“So, the possible triples ... So, three triples in total ... Wait, hold on, let me check if there are more ... So, total three triples.”
Conciseness of language“Total N = 2016. Therefore, the remainder is 16.”“Wait, hold on a second. ... Maybe I can think of all possible partitions ... No, I think the only possible triples are the three we found.”
Logical signposting“Case 1: s = 1 ... Case 2: s = 2 ... Case 3: s = 3 (no solutions).”“Case 1: S = 1 ... Subcase 1a ... Subcase 1b ... (digression) ... Case 3: S = 3 ... no solutions ... (returns to earlier cases).”
Error-checking“Only three possible triples, so the computation is complete.”“Wait, hold on a second. Is that all? ... let me verify the triples again ... maybe there are other triples?”
LengthEntire solution ≈ 200 words.Entire solution ≈ 370 words (many repeated sentences such as “So, I think 16 is the answer”).
+ +Table 5: Qualitative comparison of the shortest correct rollouts from GRPO+L and Deepseek-7B for AIME 25 I, Problem 3. Italicized text in the Deepseek-7B column represents meta-commentary or self-correction loops. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09696/images/16c7a276b19b5bd18129c5cf39ce6ea82c941e083f61efcbc174c878634d8a02.jpg b/data/2025/2504_09xxx/2504.09696/images/16c7a276b19b5bd18129c5cf39ce6ea82c941e083f61efcbc174c878634d8a02.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1862814258a821c56ab9b50854dff3475e83d9e9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/16c7a276b19b5bd18129c5cf39ce6ea82c941e083f61efcbc174c878634d8a02.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:07a98fd147747aac8ca6db0a7cb19aa70789ce85c737c6475779ad6774d862c0 +size 9527 diff --git a/data/2025/2504_09xxx/2504.09696/images/1f9e243b080e8803874abb0d7a3116c00508f277e09d4ff8f36c76efb04e4978.jpg b/data/2025/2504_09xxx/2504.09696/images/1f9e243b080e8803874abb0d7a3116c00508f277e09d4ff8f36c76efb04e4978.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8c82011360c83b3b57fb25a255126eb74dd5b7db --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/1f9e243b080e8803874abb0d7a3116c00508f277e09d4ff8f36c76efb04e4978.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:408308132966d158869827c2e8fe8663bd469c6fff8a79660f1881510befe405 +size 11248 diff --git a/data/2025/2504_09xxx/2504.09696/images/1fb4da8497ab06226005c02b6f0912d47c012f78d52a6a63a29cd98eae58f115.jpg b/data/2025/2504_09xxx/2504.09696/images/1fb4da8497ab06226005c02b6f0912d47c012f78d52a6a63a29cd98eae58f115.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25558e44c6ed688d54e0bf8b6e942e79c4d1804c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/1fb4da8497ab06226005c02b6f0912d47c012f78d52a6a63a29cd98eae58f115.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4dddb3b72691ac12fd3ecde26b54d379870aeb135849083d1a8d78bb14b9e281 +size 7522 diff --git a/data/2025/2504_09xxx/2504.09696/images/274094583e67753086af142c74b384929a0aa9ee769873e28522d71ca15deeb9.jpg b/data/2025/2504_09xxx/2504.09696/images/274094583e67753086af142c74b384929a0aa9ee769873e28522d71ca15deeb9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ab7d3a571a5c44fcb6c08c76bbe29111b9189cb8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/274094583e67753086af142c74b384929a0aa9ee769873e28522d71ca15deeb9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:49c8ac5689d08d532c94160d769b88fcd1782d5c60ada8f3244476dc03112101 +size 9221 diff --git a/data/2025/2504_09xxx/2504.09696/images/35c0b48f224a10f3ce125cb8d89a0d94f7d88abe9247cffe859c99119ae86f8b.jpg b/data/2025/2504_09xxx/2504.09696/images/35c0b48f224a10f3ce125cb8d89a0d94f7d88abe9247cffe859c99119ae86f8b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ff6deb1bdced92703e1612cd89309f6af702df73 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/35c0b48f224a10f3ce125cb8d89a0d94f7d88abe9247cffe859c99119ae86f8b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2dd7ac230881bd0fe42086d4adbad7625c55d1e1f92a4b76da916d3c0da33884 +size 4342 diff --git a/data/2025/2504_09xxx/2504.09696/images/3bf3393ea5c329e569063351f4b214385e62b4c643fed7c2d96fd2f2ce567634.jpg b/data/2025/2504_09xxx/2504.09696/images/3bf3393ea5c329e569063351f4b214385e62b4c643fed7c2d96fd2f2ce567634.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54dfd69ad617dfd495e9155b81c508a9017cb84c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/3bf3393ea5c329e569063351f4b214385e62b4c643fed7c2d96fd2f2ce567634.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5c62767612d55df0b3cdbe416ae6ce069f4c5202d039b09164be79c8e2469647 +size 3205 diff --git a/data/2025/2504_09xxx/2504.09696/images/3ee3108d62115e6f66bfdf1f24274cd18a97afe31f29e4ed99600807b8330225.jpg b/data/2025/2504_09xxx/2504.09696/images/3ee3108d62115e6f66bfdf1f24274cd18a97afe31f29e4ed99600807b8330225.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c26f04f858d48a48f7c29dd1858490259772c2d3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/3ee3108d62115e6f66bfdf1f24274cd18a97afe31f29e4ed99600807b8330225.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b68af0b0753f8326f7e33ac15e6dc4dafe5650aee6704a4e3a95f26a2195f830 +size 7195 diff --git a/data/2025/2504_09xxx/2504.09696/images/3fa5200369db7547f78269a6c2c7425fa2544a50a17f62152b292e4193de8175.jpg b/data/2025/2504_09xxx/2504.09696/images/3fa5200369db7547f78269a6c2c7425fa2544a50a17f62152b292e4193de8175.jpg new file mode 100644 index 0000000000000000000000000000000000000000..52a2eb228e3f7494fecc56cfe317eb9a711fea7c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/3fa5200369db7547f78269a6c2c7425fa2544a50a17f62152b292e4193de8175.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6c18b28ee6c531dbe9a982372c91835b4b0902b3d6afe402b8c458b2967c9192 +size 9577 diff --git a/data/2025/2504_09xxx/2504.09696/images/478b1b6e2f5432a216d7f533b5b55e9c849b458bb79fec9b9d88a6449e47522a.jpg b/data/2025/2504_09xxx/2504.09696/images/478b1b6e2f5432a216d7f533b5b55e9c849b458bb79fec9b9d88a6449e47522a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..caca42eae3110fccc5292c95195582086f004286 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/478b1b6e2f5432a216d7f533b5b55e9c849b458bb79fec9b9d88a6449e47522a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc8fef560cad85af7869a133b835ac4f8dd51d27da4bb9b319db8f3c6c9f9f17 +size 108158 diff --git a/data/2025/2504_09xxx/2504.09696/images/49bd1596981623fadfe154c0083f9a46d8c8b90fb2249bc285484b80b8a37d35.jpg b/data/2025/2504_09xxx/2504.09696/images/49bd1596981623fadfe154c0083f9a46d8c8b90fb2249bc285484b80b8a37d35.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5ad9912695b18a07014f943301688d0367cd5f28 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/49bd1596981623fadfe154c0083f9a46d8c8b90fb2249bc285484b80b8a37d35.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:089650ab7b4a033ae70dddf7685de02a6d53ba6aa9555535b50056269f750f51 +size 13807 diff --git a/data/2025/2504_09xxx/2504.09696/images/59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg b/data/2025/2504_09xxx/2504.09696/images/59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f43e3877b211be2d3feba35a2c249add9404a1fb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4cf926c64b94757f5fe733e6d1624388781fd11dc82a7d1a47f12d7e4f4c744e +size 28796 diff --git a/data/2025/2504_09xxx/2504.09696/images/7b4de12b34c63c91c7c21a3c5c02c39504eef03e7f0ba951fbb09f305c5ccb42.jpg b/data/2025/2504_09xxx/2504.09696/images/7b4de12b34c63c91c7c21a3c5c02c39504eef03e7f0ba951fbb09f305c5ccb42.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a33bf706ff9728d96afcbe83d62211e8f87d5186 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/7b4de12b34c63c91c7c21a3c5c02c39504eef03e7f0ba951fbb09f305c5ccb42.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fe04e9299879a6d0e409c6cbaba6faebcc2a1e1d39def9df717fe586a963fb7 +size 4806 diff --git a/data/2025/2504_09xxx/2504.09696/images/8e247d0c876304cde98fffbdce30b4e7b15bb0ca2c04c96aaa8a2c38f227604e.jpg b/data/2025/2504_09xxx/2504.09696/images/8e247d0c876304cde98fffbdce30b4e7b15bb0ca2c04c96aaa8a2c38f227604e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9426c4fec15487ec2704e036a615962b2cd07a26 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/8e247d0c876304cde98fffbdce30b4e7b15bb0ca2c04c96aaa8a2c38f227604e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fff29e02031576b11a6a23bc6bd2ae7e0a9ccbba3c00c7059b1d176b9204e6b9 +size 32333 diff --git a/data/2025/2504_09xxx/2504.09696/images/9230674a58fba87161ad7c0744bfa24540aa98dea15fe70898e188356acfe870.jpg b/data/2025/2504_09xxx/2504.09696/images/9230674a58fba87161ad7c0744bfa24540aa98dea15fe70898e188356acfe870.jpg new file mode 100644 index 0000000000000000000000000000000000000000..363a5a4b36934b3917c5647864950af6d37dc5a6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/9230674a58fba87161ad7c0744bfa24540aa98dea15fe70898e188356acfe870.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:58490de56e51246493a5497cbdc0c72befbc5e6a45d58c6f8199b0373861de91 +size 42611 diff --git a/data/2025/2504_09xxx/2504.09696/images/9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg b/data/2025/2504_09xxx/2504.09696/images/9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3fc1295016ba17554f2f2c1df79aed9c42078be --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b53e07d7a5d353e3883b5adcc5d0cef0071fe284ba204cecad2392bf0fc17a44 +size 103731 diff --git a/data/2025/2504_09xxx/2504.09696/images/bb21394a055c4e6334099714496cb564eb5a23dcb6c328d6057f393534a6b2f9.jpg b/data/2025/2504_09xxx/2504.09696/images/bb21394a055c4e6334099714496cb564eb5a23dcb6c328d6057f393534a6b2f9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a2b4725903dd6646661d7c6af2e2a5b7f37f1bc6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/bb21394a055c4e6334099714496cb564eb5a23dcb6c328d6057f393534a6b2f9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e214bf1b5839bc356ecbe0533359fbc25239e7cc2b12ffff10290e9a7a2434ab +size 126277 diff --git a/data/2025/2504_09xxx/2504.09696/images/cb27064bad9d7d7aad0c36e8295a8fde63547fc8b426989787b2f2ac1bb59271.jpg b/data/2025/2504_09xxx/2504.09696/images/cb27064bad9d7d7aad0c36e8295a8fde63547fc8b426989787b2f2ac1bb59271.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8a15163962623d01d8292126f9c78f9714774e80 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/cb27064bad9d7d7aad0c36e8295a8fde63547fc8b426989787b2f2ac1bb59271.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1c5d73e2311bfe4e729cd4e25b059a56ab70fe966fd0eaddfacc1338e5858e42 +size 40718 diff --git a/data/2025/2504_09xxx/2504.09696/images/ecf1585c4fefdee7569f5058092eb3664780d7e731db04902c9053171784375e.jpg b/data/2025/2504_09xxx/2504.09696/images/ecf1585c4fefdee7569f5058092eb3664780d7e731db04902c9053171784375e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..699ec9196104e515b2f3da361486538a0bca41e9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/ecf1585c4fefdee7569f5058092eb3664780d7e731db04902c9053171784375e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d682c1f8196f07ba58653e6c0da12c5fd000b610208bbf9adeb0c32b821955cd +size 6284 diff --git a/data/2025/2504_09xxx/2504.09696/images/f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg b/data/2025/2504_09xxx/2504.09696/images/f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a092a0f531740a028bec12eeeb392c62c4c55847 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c8be724a21ebd048c2270145f204fc0eba1af585c59116fb1634043241fb6c6 +size 37307 diff --git a/data/2025/2504_09xxx/2504.09696/images/f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg b/data/2025/2504_09xxx/2504.09696/images/f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a51fc7cfbb0a5251c60560b07a93cdb4801a7668 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/images/f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5044f34281c1dc83fd791aaf4126db61e7e6041f10967eb5e3834d66f6284f58 +size 40883 diff --git a/data/2025/2504_09xxx/2504.09696/layout.json b/data/2025/2504_09xxx/2504.09696/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..b26e8d26534aa0cb8e3d0ceae91b0d6dbae58d74 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09696/layout.json @@ -0,0 +1,7359 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 69, + 75, + 526, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 75, + 526, + 110 + ], + "spans": [ + { + "bbox": [ + 69, + 75, + 526, + 110 + ], + "type": "text", + "content": "GRPO-LEAD: A Difficulty-Aware Reinforcement Learning Approach for Concise Mathematical Reasoning in Language Models" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 133, + 133, + 260, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 133, + 260, + 175 + ], + "spans": [ + { + "bbox": [ + 133, + 133, + 260, + 175 + ], + "type": "text", + "content": "Jixiao Zhang* Johns Hopkins University jzhan432@jh.edu" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 333, + 133, + 459, + 174 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 133, + 459, + 174 + ], + "spans": [ + { + "bbox": [ + 333, + 133, + 459, + 174 + ], + "type": "text", + "content": "Chunsheng Zuo* Johns Hopkins University czuo3@jh.edu" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 238, + 274, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 238, + 274, + 501 + ], + "spans": [ + { + "bbox": [ + 85, + 238, + 274, + 501 + ], + "type": "text", + "content": "Group Relative Policy Optimization (GRPO), which is widely adopted by R1-like reasoning models, has advanced mathematical reasoning. Nevertheless, GRPO faces challenges in reward sparsity, verosity, and inadequate focus on problem difficulty. We propose GRPO-LEAD, enhancing GRPO with: (1) length-regularized rewards to encourage conciseness while maintaining accuracy; (2) explicit penalties for incorrect solutions to improve model precision; and (3) difficulty-aware advantage reweighting for robust generalization on challenging problems. Comprehensive evaluations demonstrate that GRPO-LEAD significantly improves reasoning accuracy, conciseness, and efficiency. Our approach achieves state-of-the-art performance for 14B-scale models, underscoring the synergy of our methods with appropriate model scale and high-quality data. Our source code, generated dataset, and models are available at https://github.com/aeroplanepaper/GRPO-LEAD." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 507, + 155, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 507, + 155, + 520 + ], + "spans": [ + { + "bbox": [ + 68, + 507, + 155, + 520 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 528, + 291, + 758 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 528, + 291, + 758 + ], + "spans": [ + { + "bbox": [ + 67, + 528, + 291, + 758 + ], + "type": "text", + "content": "Recently, R1-like reasoning models have attracted significant attention due to their impressive performance in solving challenging mathematical reasoning tasks through extensive chains of thought (Luo et al., 2025b; Wen et al., 2025). According to the technical report introducing R1 (Guo et al., 2025), reinforcement learning (RL) fine-tuning plays a pivotal role in enabling this reasoning capability. In particular, Group Relative Policy Optimization (GRPO) (Shao et al., 2024), a novel RL approach for language models, has emerged as a promising alternative to traditional methods such as PPO (Schulman et al., 2017) and DPO (Rafailov et al., 2023), primarily due to its efficiency and intrinsic compatibility with language model training. Researchers across various domains have successfully employed GRPO (Li et al., 2025; Liu et al.," + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 219, + 526, + 247 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 219, + 526, + 247 + ], + "spans": [ + { + "bbox": [ + 302, + 219, + 526, + 247 + ], + "type": "text", + "content": "2025a; Luo et al., 2025a; Dai et al., 2025), achieving impressive outcomes." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 251, + 526, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 251, + 526, + 535 + ], + "spans": [ + { + "bbox": [ + 302, + 251, + 526, + 535 + ], + "type": "text", + "content": "Despite its strengths, existing GRPO implementations encounter significant limitations. A primary issue is reward sparsity stemming from binary, rule-based accuracy metrics; when responses within problem groups exhibit uniform correctness or incorrectness, the resulting uniform reward signals offer minimal differentiation, weakening learning gradients and hampering convergence. Moreover, such uniform signals inadequately promote concise reasoning, leading to unnecessarily verbose outputs and inefficiencies during training and inference. Additionally, the current reward formulation lacks explicit penalties for incorrect answers (Hu et al., 2025a; Luo et al., 2025b; Chu et al., 2025), inadvertently encouraging models to guess rather than engage in rigorous reasoning, thereby compromising precision. Furthermore, rewards are applied uniformly across problems regardless of their intrinsic difficulty, causing models to excessively optimize simpler tasks while neglecting more challenging problems that require deeper reasoning." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 540, + 527, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 540, + 527, + 690 + ], + "spans": [ + { + "bbox": [ + 302, + 540, + 527, + 690 + ], + "type": "text", + "content": "Furthermore, computational efficiency also emerges as a critical practical concern, as reinforcement learning fine-tuning typically demands substantial resources, limiting accessibility, experimentation speed, and scalability, especially in low-resource environments. The current GRPO formulation is insufficient for encouraging concise and precise reasoning. Consequently, reducing computational requirements during both training and inference is essential for enabling broader applicability and effective real-world deployment." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 526, + 775 + ], + "type": "text", + "content": "Motivated by these limitations, this work introduces GRPO-LEAD, a suite of targeted modifications explicitly designed to enhance GRPO's effectiveness for mathematical reasoning tasks. The overall framework is illustrated in Figure 1. Our key contributions include:" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 13, + 244, + 38, + 594 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 13, + 244, + 38, + 594 + ], + "spans": [ + { + "bbox": [ + 13, + 244, + 38, + 594 + ], + "type": "text", + "content": "arXiv:2504.09696v2 [cs.CL] 19 Sep 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 81, + 762, + 156, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 762, + 156, + 775 + ], + "spans": [ + { + "bbox": [ + 81, + 762, + 156, + 775 + ], + "type": "text", + "content": "*Equal contribution." + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 82, + 74, + 512, + 275 + ], + "blocks": [ + { + "bbox": [ + 82, + 74, + 512, + 275 + ], + "lines": [ + { + "bbox": [ + 82, + 74, + 512, + 275 + ], + "spans": [ + { + "bbox": [ + 82, + 74, + 512, + 275 + ], + "type": "image", + "image_path": "9897cdeb9ea0239d5c8fe13664f290f1ecf099b758915f70595281e0aa979e0d.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 66, + 288, + 525, + 338 + ], + "lines": [ + { + "bbox": [ + 66, + 288, + 525, + 338 + ], + "spans": [ + { + "bbox": [ + 66, + 288, + 525, + 338 + ], + "type": "text", + "content": "Figure 1: The GRPO-LEAD framework assigns length-regularized positive rewards to correct answers and explicit penalties to incorrect ones. A difficulty-based weight " + }, + { + "bbox": [ + 66, + 288, + 525, + 338 + ], + "type": "inline_equation", + "content": "w" + }, + { + "bbox": [ + 66, + 288, + 525, + 338 + ], + "type": "text", + "content": " used for advantage reweighting is determined from the empirical correctness of responses for each question. This weight then scales the advantages derived from each question, prioritizing harder questions over easier ones during the policy update to foster robust reasoning." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 80, + 358, + 291, + 523 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 80, + 358, + 290, + 412 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 358, + 290, + 412 + ], + "spans": [ + { + "bbox": [ + 80, + 358, + 290, + 412 + ], + "type": "text", + "content": "- We introduce a length-regularized reward with an explicit penalty for incorrect solutions to encourage solution conciseness while maintaining accuracy." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 413, + 290, + 454 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 413, + 290, + 454 + ], + "spans": [ + { + "bbox": [ + 80, + 413, + 290, + 454 + ], + "type": "text", + "content": "- We apply difficulty-aware advantage reweighting to focus learning on more challenging problems, fostering robust generalization." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 80, + 455, + 291, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 455, + 291, + 523 + ], + "spans": [ + { + "bbox": [ + 80, + 455, + 291, + 523 + ], + "type": "text", + "content": "- Our comprehensive evaluations demonstrate GRPO-LEAD significantly improves reasoning accuracy and conciseness, achieving state-of-the-art performance in mathematical reasoning for 14B-scale models." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 532, + 161, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 532, + 161, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 532, + 161, + 544 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 553, + 265, + 565 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 553, + 265, + 565 + ], + "spans": [ + { + "bbox": [ + 67, + 553, + 265, + 565 + ], + "type": "text", + "content": "2.1 Group Relative Policy Optimization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 570, + 290, + 691 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 570, + 290, + 691 + ], + "spans": [ + { + "bbox": [ + 67, + 570, + 290, + 691 + ], + "type": "text", + "content": "Group Relative Policy Optimization (GRPO) is a recently proposed algorithm designed specifically for fine-tuning language models with group-level normalization of rewards (Guo et al., 2025). GRPO modifies the standard policy gradient objective by introducing relative advantages within sets of responses corresponding to the same query, stabilizing updates and promoting consistent learning signals. Formally, GRPO defines the objective as:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 697, + 289, + 773 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 697, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 73, + 697, + 289, + 773 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\mathrm {G R P O}} (\\theta) = \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\frac {1}{| o _ {i} |} \\sum_ {t = 1} ^ {| o _ {i} |} \\left[ \\min \\left(r _ {i, t} (\\theta) \\hat {A} _ {i, t}, \\right. \\right. \\tag {1} \\\\ \\left. \\operatorname {c l i p} \\left(r _ {i, t} (\\theta), 1 - \\epsilon , 1 + \\epsilon) \\hat {A} _ {i, t}\\right) \\right] \\\\ \\end{array}", + "image_path": "49bd1596981623fadfe154c0083f9a46d8c8b90fb2249bc285484b80b8a37d35.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 358, + 516, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 358, + 516, + 371 + ], + "spans": [ + { + "bbox": [ + 302, + 358, + 516, + 371 + ], + "type": "text", + "content": "where the importance sampling ratio is given by" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 347, + 377, + 525, + 407 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 377, + 525, + 407 + ], + "spans": [ + { + "bbox": [ + 347, + 377, + 525, + 407 + ], + "type": "interline_equation", + "content": "r _ {i, t} (\\theta) = \\frac {\\pi_ {\\theta} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}{\\pi_ {\\theta_ {\\mathrm {o l d}}} \\left(o _ {i , t} \\mid q , o _ {i , < t}\\right)}. \\tag {2}", + "image_path": "3ee3108d62115e6f66bfdf1f24274cd18a97afe31f29e4ed99600807b8330225.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "spans": [ + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "content": "Here, " + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "content": " denotes the number of groups (e.g., different queries), " + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "inline_equation", + "content": "\\hat{A}_{i,t}" + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "content": " is the normalized advantage within group " + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 302, + 411, + 525, + 465 + ], + "type": "text", + "content": " defines the clipping range for conservative updates." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 473, + 405, + 486 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 473, + 405, + 486 + ], + "spans": [ + { + "bbox": [ + 302, + 473, + 405, + 486 + ], + "type": "text", + "content": "2.2 Length Reward" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 301, + 491, + 525, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 491, + 525, + 652 + ], + "spans": [ + { + "bbox": [ + 301, + 491, + 525, + 652 + ], + "type": "text", + "content": "A prevalent issue in reinforcement learning-based fine-tuning of language models is reward hacking (Everitt et al., 2017; Gao et al., 2023; Weng, 2024). In GRPO, when the model is trained with a large fixed budget, it can exploit this budget by producing an excessive number of extra reasoning and verification steps to ensure the correctness of the answer and therefore reach a higher reward. This phenomenon leads to unnecessarily verbose responses that lack conciseness and hinder interpretability, resulting in inefficiency in reasoning and reducing the model's practicality." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 653, + 526, + 775 + ], + "type": "text", + "content": "Existing efforts to mitigate this problem typically involve incentivizing shorter answers to encourage more succinct reasoning processes. For example, Kimi proposed an individual min-max normalized length reward based on the lengths of generated responses (Team et al., 2025). Yeo et al. introduced a cosine length reward function with fixed maximum and minimum thresholds to manage response lengths (Yeo et al., 2025). Aggarwal et al." + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 289, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 289, + 111 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 289, + 111 + ], + "type": "text", + "content": "utilized a target \"golden length\" to directly reward or penalize responses based on their deviation from an ideal length (Aggarwal and Welleck, 2025)." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 112, + 289, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 112, + 289, + 232 + ], + "spans": [ + { + "bbox": [ + 67, + 112, + 289, + 232 + ], + "type": "text", + "content": "However, these existing methods depend heavily on static or predefined length heuristics, limiting their effectiveness across diverse questions of varying complexity. In contrast, our proposed length-dependent accuracy reward addresses these limitations by dynamically calibrating rewards according to each group's relative response length and rollout accuracy, promoting concise yet difficulty-aware reasoning processes." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 244, + 129, + 257 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 244, + 129, + 257 + ], + "spans": [ + { + "bbox": [ + 67, + 244, + 129, + 257 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 266, + 291, + 497 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 266, + 291, + 497 + ], + "spans": [ + { + "bbox": [ + 67, + 266, + 291, + 497 + ], + "type": "text", + "content": "To systematically address the limitations identified in existing implementations of Group Relative Policy Optimization (GRPO), we propose a suite of novel modifications collectively termed GRPO-LEAD (GRPO with Length-dependent rewards, Explicit penalties, and Advantage reweighting for Difficulty). Our proposed method enhances the original GRPO framework by introducing three core innovations: 1) a length-dependent accuracy reward to foster concise solutions, 2) an explicit penalty mechanism to mitigate low precision rate caused by length reward, and 3) a difficulty-aware advantage reweighting strategy that amplifies learning signals for challenging problems. Additionally, we examine how base model scale and supervised fine-tuning (SFT) impact the effectiveness of reinforcement learning (RL) fine-tuning." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 507, + 269, + 520 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 507, + 269, + 520 + ], + "spans": [ + { + "bbox": [ + 67, + 507, + 269, + 520 + ], + "type": "text", + "content": "3.1 Length-Dependent Accuracy Reward" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "spans": [ + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": "The core idea is to reward correct completions not uniformly but in proportion to their relative conciseness. Given a question " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": " and a set of model-generated responses " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "\\{o_i\\}" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": ", we first isolate the subset of correct responses and compute the mean " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "\\mu" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": " and standard deviation " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "\\sigma" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": " of their token lengths. For a correct response " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": " with length " + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "inline_equation", + "content": "|o|" + }, + { + "bbox": [ + 67, + 524, + 290, + 632 + ], + "type": "text", + "content": ", we define its standardized length deviation as:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 149, + 643, + 290, + 670 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 643, + 290, + 670 + ], + "spans": [ + { + "bbox": [ + 149, + 643, + 290, + 670 + ], + "type": "interline_equation", + "content": "z = \\frac {| o | - \\mu}{\\sigma + \\epsilon}, \\tag {3}", + "image_path": "3bf3393ea5c329e569063351f4b214385e62b4c643fed7c2d96fd2f2ce567634.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 678, + 289, + 719 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 678, + 289, + 719 + ], + "spans": [ + { + "bbox": [ + 67, + 678, + 289, + 719 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 678, + 289, + 719 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 67, + 678, + 289, + 719 + ], + "type": "text", + "content": " is a small constant added for numerical stability. The final reward is modulated using an exponential decay function:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 729, + 290, + 775 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 729, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 70, + 729, + 290, + 775 + ], + "type": "interline_equation", + "content": "R _ {\\text {a c c u r a c y}} (o | q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ 0, & \\text {i f o i s i n c o r r e c t .} \\end{array} \\right. \\tag {4}", + "image_path": "16c7a276b19b5bd18129c5cf39ce6ea82c941e083f61efcbc174c878634d8a02.jpg" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "inline_equation", + "content": "\\alpha > 0" + }, + { + "bbox": [ + 302, + 71, + 524, + 97 + ], + "type": "text", + "content": " is a tunable hyperparameter controlling the strength of length penalization." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 99, + 525, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 99, + 525, + 193 + ], + "spans": [ + { + "bbox": [ + 302, + 99, + 525, + 193 + ], + "type": "text", + "content": "This formulation ensures that overly long correct responses are systematically penalized, while relatively concise ones are amplified. Unlike static or absolute length constraints, our approach leverages standardized deviation, allowing for dynamic adaptation to the distributional properties of each question." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 205, + 522, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 205, + 522, + 232 + ], + "spans": [ + { + "bbox": [ + 302, + 205, + 522, + 232 + ], + "type": "text", + "content": "3.2 Explicit Penalty for Incorrect Answers to Enhance True Accuracy" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "spans": [ + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "text", + "content": "Existing methods often prioritize maximizing " + }, + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "inline_equation", + "content": "\\text{pass} @ 1" + }, + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "text", + "content": " — the success rate on the first attempt—typically within restricted response lengths. However, this focus can inadvertently degrade overall model accuracy. The fundamental issue appears to stem from the use of a binary accuracy reward, rather than length-based regularization: under pressure to generate responses within a limited length, a model is encouraged to provide an answer, even if it's a guess, rather than no answer at all. Such guesses can achieve a non-zero reward and inflate " + }, + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "inline_equation", + "content": "\\text{pass} @ 1" + }, + { + "bbox": [ + 301, + 236, + 525, + 411 + ], + "type": "text", + "content": ", but they do so at the cost of overall precision by rewarding less rigorous reasoning." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 412, + 525, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 412, + 525, + 479 + ], + "spans": [ + { + "bbox": [ + 302, + 412, + 525, + 479 + ], + "type": "text", + "content": "To counteract this tendency and foster a more robust distinction between correct and incorrect outputs, we introduce a revised reward structure that explicitly penalizes incorrect responses. This new reward function is defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 303, + 486, + 525, + 533 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 486, + 525, + 533 + ], + "spans": [ + { + "bbox": [ + 303, + 486, + 525, + 533 + ], + "type": "interline_equation", + "content": "R _ {\\text {a c c u r a c y}} (o \\mid q) = \\left\\{ \\begin{array}{l l} \\exp (- \\alpha z), & \\text {i f o i s c o r r e c t ,} \\\\ - 1, & \\text {i f o i s i n c o r r e c t ,} \\end{array} \\right. \\tag {5}", + "image_path": "3fa5200369db7547f78269a6c2c7425fa2544a50a17f62152b292e4193de8175.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "spans": [ + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "inline_equation", + "content": "o" + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "content": " is the output, " + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "content": " is the question, " + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "inline_equation", + "content": "z" + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "content": " represents the standardized length deviation of a correct response, and " + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "inline_equation", + "content": "\\alpha > 0" + }, + { + "bbox": [ + 302, + 534, + 524, + 600 + ], + "type": "text", + "content": " is a hyperparameter controlling the strength of the length penalization for correct answers, consistent with prior definitions." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 601, + 524, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 601, + 524, + 640 + ], + "spans": [ + { + "bbox": [ + 302, + 601, + 524, + 640 + ], + "type": "text", + "content": "The expected reward for a response, given its probability of correctness " + }, + { + "bbox": [ + 302, + 601, + 524, + 640 + ], + "type": "inline_equation", + "content": "P(\\text{correct})" + }, + { + "bbox": [ + 302, + 601, + 524, + 640 + ], + "type": "text", + "content": ", under this formulation is:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 315, + 651, + 524, + 683 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 651, + 524, + 683 + ], + "spans": [ + { + "bbox": [ + 315, + 651, + 524, + 683 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathbb {E} \\left[ R _ {\\text {a c c u r a c y}} (o \\mid q) \\right] = P (\\text {c o r r e c t}) \\cdot \\exp (- \\alpha z) \\\\ - (1 - P (\\text {c o r r e c t})) \\tag {4} \\\\ \\end{array}", + "image_path": "274094583e67753086af142c74b384929a0aa9ee769873e28522d71ca15deeb9.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "content": "To intuitively grasp the impact of this reward function, let us consider a simplified scenario where the length penalty for correct answers is negligible (i.e., " + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\exp (-\\alpha z)\\approx 1" + }, + { + "bbox": [ + 302, + 693, + 525, + 775 + ], + "type": "text", + "content": "). In practice, the average reward for correct answers often normalizes close to this value. Under this assumption, the expected reward" + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 128, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 128, + 84 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 128, + 84 + ], + "type": "text", + "content": "simplifies to:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 123, + 90, + 290, + 105 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 123, + 90, + 290, + 105 + ], + "spans": [ + { + "bbox": [ + 123, + 90, + 290, + 105 + ], + "type": "interline_equation", + "content": "\\mathbb {E} [ R ] \\approx 2 P (\\text {c o r r e c t}) - 1 \\tag {5}", + "image_path": "35c0b48f224a10f3ce125cb8d89a0d94f7d88abe9247cffe859c99119ae86f8b.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "spans": [ + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "text", + "content": "This approximation reveals a crucial characteristic: the expected reward becomes positive only when " + }, + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "inline_equation", + "content": "P(\\mathrm{correct}) > 0.5" + }, + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "text", + "content": ". This threshold acts as a principled deterrent against speculative guessing, compelling the model to internalize a more stringent decision boundary for correctness. Our empirical results confirm that this approach significantly improves both " + }, + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "inline_equation", + "content": "pass@1" + }, + { + "bbox": [ + 67, + 111, + 291, + 246 + ], + "type": "text", + "content": " and overall precision, encouraging the model to favor accuracy over mere completion." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 254, + 223, + 281 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 254, + 223, + 281 + ], + "spans": [ + { + "bbox": [ + 67, + 254, + 223, + 281 + ], + "type": "text", + "content": "3.3 Advantage Reweighting for Difficulty-Aware Training" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 285, + 291, + 433 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 285, + 291, + 433 + ], + "spans": [ + { + "bbox": [ + 67, + 285, + 291, + 433 + ], + "type": "text", + "content": "While length reward and advantage reweighting can enhance precision and mitigate morbidity, uniformly applying rewards across all questions, irrespective of their intrinsic difficulty, may implicitly bias the model. It might learn to excessively optimize performance on simpler tasks—where correct and concise responses are more readily achieved—while neglecting more complex questions that demand deeper reasoning. Consequently, the performance on challenging problems can degrade." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 434, + 291, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 291, + 514 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 291, + 514 + ], + "type": "text", + "content": "Therefore, we introduce a difficulty-aware advantage reweighting strategy, which dynamically adjusts the magnitude of policy updates based on an estimate of problem difficulty. The intuition is to amplify learning signals for harder tasks, re-anchoring the model towards harder tasks." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "spans": [ + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "text", + "content": "Formally, we first quantify problem difficulty. For a given question " + }, + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "text", + "content": " and its associated set of sampled responses " + }, + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "inline_equation", + "content": "\\{o_i\\}" + }, + { + "bbox": [ + 67, + 515, + 291, + 569 + ], + "type": "text", + "content": ", we define the group's empirical correctness ratio as:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 82, + 573, + 290, + 602 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 573, + 290, + 602 + ], + "spans": [ + { + "bbox": [ + 82, + 573, + 290, + 602 + ], + "type": "interline_equation", + "content": "\\rho_ {q} = \\frac {\\text {n u m b e r o f c o r r e c t r e s p o n s e s f o r} q}{\\text {t o t a l n u m b e r o f r e s p o n s e s f o r} q}. \\tag {6}", + "image_path": "1f9e243b080e8803874abb0d7a3116c00508f277e09d4ff8f36c76efb04e4978.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "spans": [ + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "text", + "content": "This ratio, " + }, + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "inline_equation", + "content": "\\rho_{q}" + }, + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "text", + "content": ", serves as an inverse proxy for problem difficulty: a lower " + }, + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "inline_equation", + "content": "\\rho_{q}" + }, + { + "bbox": [ + 67, + 605, + 290, + 644 + ], + "type": "text", + "content": " suggests a harder question." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 646, + 290, + 698 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 646, + 290, + 698 + ], + "spans": [ + { + "bbox": [ + 67, + 646, + 290, + 698 + ], + "type": "text", + "content": "Next, we introduce a logistic reweighting factor dependent on this ratio to modulate the advantage estimates during the RL training step. The logistic function is defined as:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 702, + 290, + 731 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 702, + 290, + 731 + ], + "spans": [ + { + "bbox": [ + 96, + 702, + 290, + 731 + ], + "type": "interline_equation", + "content": "w \\left(\\rho_ {q}\\right) = A + \\frac {B - A}{1 + \\exp \\left[ k \\left(\\rho_ {q} - \\rho_ {0}\\right) \\right]}, \\tag {7}", + "image_path": "ecf1585c4fefdee7569f5058092eb3664780d7e731db04902c9053171784375e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "content": "where hyperparameters " + }, + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "inline_equation", + "content": "A, B, \\rho_0, k" + }, + { + "bbox": [ + 67, + 735, + 290, + 775 + ], + "type": "text", + "content": " allow precise control over the sensitivity of weighting to problem difficulty." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "text", + "content": "To apply this reweighting, we first consider the normalized advantage estimate for a response " + }, + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "text", + "content": " to question " + }, + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 302, + 71, + 525, + 112 + ], + "type": "text", + "content": ":" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 367, + 122, + 525, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 367, + 122, + 525, + 152 + ], + "spans": [ + { + "bbox": [ + 367, + 122, + 525, + 152 + ], + "type": "interline_equation", + "content": "\\tilde {A} _ {i} = \\frac {R \\left(o _ {i} | q\\right) - \\mu_ {q}}{\\sigma_ {q} + \\epsilon}, \\tag {8}", + "image_path": "7b4de12b34c63c91c7c21a3c5c02c39504eef03e7f0ba951fbb09f305c5ccb42.jpg" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "spans": [ + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "\\mu_q" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "\\sigma_q" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": " are the mean and standard deviation of rewards " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "R(o_i|q)" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": " for responses to question " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "q" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "\\epsilon" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": " is a small constant for numerical stability. We then define the difficulty-aware advantage, " + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "inline_equation", + "content": "A_i'" + }, + { + "bbox": [ + 302, + 162, + 526, + 228 + ], + "type": "text", + "content": ", as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 333, + 238, + 525, + 274 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 238, + 525, + 274 + ], + "spans": [ + { + "bbox": [ + 333, + 238, + 525, + 274 + ], + "type": "interline_equation", + "content": "A _ {i} ^ {\\prime} = \\tilde {A} _ {i} \\cdot \\left\\{ \\begin{array}{l l} w \\left(\\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} > 0 \\\\ w \\left(1 - \\rho_ {q}\\right), & \\text {i f} \\tilde {A} _ {i} \\leq 0 \\end{array} \\right. \\tag {9}", + "image_path": "1fb4da8497ab06226005c02b6f0912d47c012f78d52a6a63a29cd98eae58f115.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "spans": [ + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "text", + "content": "This formulation ensures that for difficult problems (low " + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "inline_equation", + "content": "\\rho_{q}" + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "text", + "content": "), correct responses (which are rare and thus highly valuable) receive substantially larger updates due to the increased weighting " + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "inline_equation", + "content": "w(\\rho_q)" + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "text", + "content": ". Conversely, incorrect responses on easier problems (high " + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "inline_equation", + "content": "\\rho_{q}" + }, + { + "bbox": [ + 302, + 284, + 525, + 393 + ], + "type": "text", + "content": ") are penalized more strongly, sharpening the decision boundary for problems where high performance should be expected." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 403, + 524, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 403, + 524, + 431 + ], + "spans": [ + { + "bbox": [ + 302, + 403, + 524, + 431 + ], + "type": "text", + "content": "3.4 Impact of Data Quality on Reinforcement Learning Effectiveness" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 302, + 436, + 525, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 436, + 525, + 624 + ], + "spans": [ + { + "bbox": [ + 302, + 436, + 525, + 624 + ], + "type": "text", + "content": "To further enhance model capabilities, we first performed supervised fine-tuning (SFT) on a specialized dataset of 13k math reasoning problems sourced from DeepScaler (Luo et al., 2025b) (including historical AMC, AIME, and OmniMath problems) with solutions generated by QwQ32B (Team, 2025). Although this SFT model initially showed signs of overfitting, subsequent application of our proposed RL strategies rapidly mitigated these issues. This SFT+RL approach yielded faster convergence and significantly improved pass@1 accuracy and overall precision compared to applying RL directly to the original base model." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 626, + 526, + 775 + ], + "type": "text", + "content": "Our findings also highlight the critical role of data quality and curriculum strategies in RL. We established a robust initial policy by applying RL to a subset of challenging problems from the DeepScaler dataset. This policy was then further refined using a curriculum composed of the most challenging problems identified from this first RL stage and supplemented by high-difficulty examples from the Light-R1 dataset (Wen et al., 2025). This two-stage curriculum markedly enhanced the model's ability to continuously improve on complex tasks." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 289, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 289, + 205 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 289, + 205 + ], + "type": "text", + "content": "Finally, we addressed a persistent formatting issue of repetitive n-gram patterns, likely stemming from an absence of clear end-of-sequence (EOS) signals during SFT. By temporarily removing length-dependent rewards and introducing an explicit negative reward " + }, + { + "bbox": [ + 69, + 71, + 289, + 205 + ], + "type": "inline_equation", + "content": "(-1.5)" + }, + { + "bbox": [ + 69, + 71, + 289, + 205 + ], + "type": "text", + "content": " for such repeated ngrams, we achieved further improvements in precision and pass@1 metrics. This intervention demonstrates the effectiveness of targeted reward modifications for mitigating specific output anomalies." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 206, + 289, + 314 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 206, + 289, + 314 + ], + "spans": [ + { + "bbox": [ + 69, + 206, + 289, + 314 + ], + "type": "text", + "content": "In summary, our experiments affirm that initial model capacity, curated data curricula for RL, and targeted reward engineering are pivotal for optimizing fine-tuning outcomes. These elements collectively inform a systematic approach for enhancing language models' ability to produce concise, accurate, and well-structured responses across tasks of varying complexity." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 324, + 190, + 338 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 324, + 190, + 338 + ], + "spans": [ + { + "bbox": [ + 69, + 324, + 190, + 338 + ], + "type": "text", + "content": "4 Experimental Setup" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 344, + 289, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 344, + 289, + 451 + ], + "spans": [ + { + "bbox": [ + 69, + 344, + 289, + 451 + ], + "type": "text", + "content": "We evaluate GRPO-LEAD, integrating length-dependent accuracy rewards, explicit penalties for incorrect solutions, and difficulty-aware advantage reweighting, on DEEPSEEK-R1 DISTILLED variants (Guo et al., 2025; Yang et al., 2024). Our experiments cover two model scales, 7B and 14B parameters. All GRPO training is conducted using the VERL framework.(Sheng et al., 2024)." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 461, + 198, + 474 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 461, + 198, + 474 + ], + "spans": [ + { + "bbox": [ + 69, + 461, + 198, + 474 + ], + "type": "text", + "content": "4.1 Datasets and Filtering" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 479, + 289, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 479, + 289, + 545 + ], + "spans": [ + { + "bbox": [ + 69, + 479, + 289, + 545 + ], + "type": "text", + "content": "Our primary training data is sourced from the DEEPSCALER dataset (Luo et al., 2025b). We filter out problems with difficulty ratings below 2.5, resulting in approximately 9,000 questions for fine-tuning." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 546, + 289, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 546, + 289, + 653 + ], + "spans": [ + { + "bbox": [ + 69, + 546, + 289, + 653 + ], + "type": "text", + "content": "For stage 2 of our 14B model experiments, we further refine the dataset by selecting problems where the model's stage-1 rollout accuracy is no greater than " + }, + { + "bbox": [ + 69, + 546, + 289, + 653 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 69, + 546, + 289, + 653 + ], + "type": "text", + "content": ", yielding around 2,283 questions. Additionally, we incorporate challenging problems with numeric answers from the stage-2 dataset of Light-R1 (Wen et al., 2025), resulting in 3,524 questions in total." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 663, + 178, + 676 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 663, + 178, + 676 + ], + "spans": [ + { + "bbox": [ + 69, + 663, + 178, + 676 + ], + "type": "text", + "content": "4.2 Hyperparameters" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 679, + 289, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 679, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 679, + 289, + 773 + ], + "type": "text", + "content": "We train with a learning rate of " + }, + { + "bbox": [ + 69, + 679, + 289, + 773 + ], + "type": "inline_equation", + "content": "1 \\times 10^{-6}" + }, + { + "bbox": [ + 69, + 679, + 289, + 773 + ], + "type": "text", + "content": ", batch size 32, and group size 8—generating 8 rollouts per question for GRPO reward computation. The KL penalty term is removed, as it was found to suppress exploration in our experiments, which is also suggested in similar works (Liu et al., 2025b; Hu et al., 2025b)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "spans": [ + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": "For the length-dependent accuracy reward, we set " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "\\alpha = 0.05" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ", providing a moderate decay that encourages conciseness without penalizing slight morbidity. For difficulty-aware advantage reweighting, we use " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "A = 0.4" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "B = 1.5" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "\\rho_0 = 0.75" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "k = 10" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ". This configuration ensures reweighting is minimal on easy problems but increases sharply near the " + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "75\\%" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": " correctness threshold. The steep slope (" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "inline_equation", + "content": "k = 10" + }, + { + "bbox": [ + 305, + 71, + 524, + 219 + ], + "type": "text", + "content": ") enables strong emphasis on high-difficulty examples, guiding the model to allocate learning more effectively." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 305, + 231, + 515, + 244 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 231, + 515, + 244 + ], + "spans": [ + { + "bbox": [ + 305, + 231, + 515, + 244 + ], + "type": "text", + "content": "4.3 Model Variants and Fine-Tuning Stages" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 305, + 249, + 524, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 249, + 524, + 439 + ], + "spans": [ + { + "bbox": [ + 305, + 249, + 524, + 439 + ], + "type": "text", + "content": "7B Model Experiments Starting from the DeepSeek-R1 Distilled 7B Qwen-Math checkpoint, we first apply standard GRPO on the 9k questions, producing a baseline. Then, we train three more models from the DeepSeek-R1 Distilled 7B QwenMath checkpoint, adding one more of the following components subsequently: (i) Length Reward only, (ii) Length Reward + Advantage Reweighting, (iii) Length Reward + Advantage Reweighting + Explicit Penalty. We train for approximately 200 steps and select the top-performing checkpoints based on validation results. At test time, we limit the generation length to 8k for all 7B models, matching the training length limit." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 305, + 449, + 524, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 449, + 524, + 609 + ], + "spans": [ + { + "bbox": [ + 305, + 449, + 524, + 609 + ], + "type": "text", + "content": "14B Model Experiments We extend the above procedure to the DeepSeek-R1 Distilled 14B Qwen checkpoint across multiple stages. In Stage 1, we train for 100 steps using all GRPO-LEAD components on the filtered 9k-question dataset. To enhance the model's base capability, we first fine-tune the model on a curated set of 13k math problems with supervised fine-tuning (SFT), then conduct the RL phase. This SFT stage significantly improves the model's reasoning quality, even though it tends to increase the output length and caused some format errors." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 305, + 612, + 524, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 612, + 524, + 679 + ], + "spans": [ + { + "bbox": [ + 305, + 612, + 524, + 679 + ], + "type": "text", + "content": "The SFT data consists of all problems in the DEEPSCALER dataset with difficulty greater than 1. To construct high-quality reasoning traces for SFT, we use the QWQ-32B model (Team, 2025) to generate step-by-step solutions." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "spans": [ + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "text", + "content": "After observing that some questions remain low correctness, we further fine-tune for Stage 2 to focus on those underperformed problems. We also address the repetitive output patterns by removing the length penalty and introducing a negative reward " + }, + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "inline_equation", + "content": "(-1.5)" + }, + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "text", + "content": " for repeated " + }, + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "inline_equation", + "content": "n" + }, + { + "bbox": [ + 305, + 681, + 524, + 773 + ], + "type": "text", + "content": "-grams. We continue training for 240 more steps (100 steps with initial settings" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 153 + ], + "type": "text", + "content": "and 140 more steps with repetition penalty), yielding the final model checkpoint. At test time, we limit the generation length to 14k for all 14B models, in accordance with our training settings and also to better compare the models' performance in a low-budget scenario." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 161, + 255, + 174 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 161, + 255, + 174 + ], + "spans": [ + { + "bbox": [ + 67, + 161, + 255, + 174 + ], + "type": "text", + "content": "4.4 Baselines and Evaluation Protocol" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 179, + 290, + 299 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 179, + 290, + 299 + ], + "spans": [ + { + "bbox": [ + 67, + 179, + 290, + 299 + ], + "type": "text", + "content": "We compare our models with both DEEPSEEK-R1 distilled-14B-Qwen (Guo et al., 2025) (the distilled Qwen model without GRPO-LEAD) and LIGHT-R1-14B-DS (Wen et al., 2025), which has the same base model as ours and was first finetuned with 3k hard math problems with SFT, and then fine-tuned with a cosine-based length reward (Yeo et al., 2025) on their selected math problems for three epochs using GRPO." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 301, + 291, + 409 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 301, + 291, + 409 + ], + "spans": [ + { + "bbox": [ + 67, + 301, + 291, + 409 + ], + "type": "text", + "content": "We primarily report three metrics: (1) Cons@32, accuracy through majority voting for 32 samplings; (2) Pass@1, the probability that the top-1 sample is correct under a chosen decoding strategy; (3) Average Length " + }, + { + "bbox": [ + 67, + 301, + 291, + 409 + ], + "type": "inline_equation", + "content": "(\\mathrm{Len}_{\\mathrm{avg}})" + }, + { + "bbox": [ + 67, + 301, + 291, + 409 + ], + "type": "text", + "content": ", measuring morbidity. Unless otherwise specified, we decode with temperature 0.6 and sample 32 solutions per question, then compute Cons@32 and Pass@1 over these samples." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 418, + 127, + 431 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 418, + 127, + 431 + ], + "spans": [ + { + "bbox": [ + 67, + 418, + 127, + 431 + ], + "type": "text", + "content": "5 Results" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 440, + 291, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 440, + 291, + 575 + ], + "spans": [ + { + "bbox": [ + 67, + 440, + 291, + 575 + ], + "type": "text", + "content": "In this section, we present a comprehensive evaluation of the proposed GRPO-LEAD framework on two mathematical benchmarks: AIME24 and AIME25. Our analysis is structured as follows: we first examine training dynamics to illustrate how GRPO-LEAD accelerates convergence; next, we perform an ablation study to assess the incremental benefits of each component; and finally, we compare against state-of-the-art baselines for 14B-scale language models." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 584, + 185, + 597 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 584, + 185, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 185, + 597 + ], + "type": "text", + "content": "5.1 Training Dynamics" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 602, + 291, + 737 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 602, + 291, + 737 + ], + "spans": [ + { + "bbox": [ + 67, + 602, + 291, + 737 + ], + "type": "text", + "content": "Figure 2 plots the evolution of Pass@1 on a validation split over training steps for three configurations of the 7B model: (i) baseline GRPO, (ii) GRPO with length reward, and (iii) GRPO with both length reward and advantage reweighting. We observe two clear trends. First, adding a length-dependent reward not only yields higher Pass@1 but also accelerates early-stage convergence, suggesting that penalizing overly verbose correct solutions provides a more informative learning signal." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 307, + 71, + 523, + 186 + ], + "blocks": [ + { + "bbox": [ + 307, + 71, + 523, + 186 + ], + "lines": [ + { + "bbox": [ + 307, + 71, + 523, + 186 + ], + "spans": [ + { + "bbox": [ + 307, + 71, + 523, + 186 + ], + "type": "image", + "image_path": "59333a80607e2d683bc530ea9c4bd43634e70dd8669f65a5c324f3f4a2e39e18.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 197, + 526, + 258 + ], + "lines": [ + { + "bbox": [ + 302, + 197, + 526, + 258 + ], + "spans": [ + { + "bbox": [ + 302, + 197, + 526, + 258 + ], + "type": "text", + "content": "Figure 2: Validation* Pass@1 over training steps for three configurations: GRPO, GRPO+L, and GRPO+LAD. As shown by the faster convergence, length reward and advantage reweighting provide a richer reward signal signal than the original setup." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 280, + 525, + 360 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 280, + 525, + 360 + ], + "spans": [ + { + "bbox": [ + 302, + 280, + 525, + 360 + ], + "type": "text", + "content": "Second, incorporating advantage reweighting (to amplify updates on harder questions) further steepens the trajectory, indicating that reweighting advantage estimates according to problem difficulty helps the model refine reasoning on challenging prompts more efficiently." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 361, + 525, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 361, + 525, + 442 + ], + "spans": [ + { + "bbox": [ + 302, + 361, + 525, + 442 + ], + "type": "text", + "content": "Overall, these dynamics confirm that GRPO-LEAD components—particularly the length reward—bolster training stability and speed. By comparison, the baseline GRPO model learns more slowly and lags behind in Pass@1 across the entire training horizon." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 452, + 414, + 465 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 452, + 414, + 465 + ], + "spans": [ + { + "bbox": [ + 302, + 452, + 414, + 465 + ], + "type": "text", + "content": "5.2 Ablation Analysis" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 469, + 525, + 522 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 469, + 525, + 522 + ], + "spans": [ + { + "bbox": [ + 302, + 469, + 525, + 522 + ], + "type": "text", + "content": "We next quantify the contribution of each GRPO-LEAD component through a step-by-step ablation on the 7B model. Table 1 summarizes results on AIME24 and AIME25." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": "Length Reward Brings Conciseness to Reasoning We first incorporate the length-dependent accuracy reward into GRPO. Compared to Deepseek-7B, length reward slightly improves Pass@1 on both AIME24 by " + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "1.6\\%" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "0.431 \\rightarrow 0.438" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": ") and AIME25 by " + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "5.4\\%" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "0.292 \\rightarrow 0.308" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": "), with an additional improvement of Cons@32 by " + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "14.1\\%" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": " on AIME25. Notably, these improvements are accompanied by a substantial reduction of 1,715 tokens (" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "24.5\\%" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": ") and 1,903 tokens (" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "inline_equation", + "content": "26.8\\%" + }, + { + "bbox": [ + 302, + 531, + 526, + 774 + ], + "type": "text", + "content": ") in the average response length on the two datasets, respectively. Figure 3 further demonstrates that length reward largely enhances performance in low-budget settings over the base model, matching its peak performance with only 5/8 of the token budget on the more difficult AIME25. These results demonstrate that length reward, by penalizing correct but overly verbose solutions, can effectively reduce unnec" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 743, + 291, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 743, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 743, + 291, + 774 + ], + "type": "text", + "content": "*The validation consists of 27 challenging problems from AIMO2 (Frieder et al., 2024), CMU-MATH-AIMO (Sun, 2024), and AIME24." + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 113, + 68, + 478, + 155 + ], + "blocks": [ + { + "bbox": [ + 113, + 68, + 478, + 155 + ], + "lines": [ + { + "bbox": [ + 113, + 68, + 478, + 155 + ], + "spans": [ + { + "bbox": [ + 113, + 68, + 478, + 155 + ], + "type": "table", + "html": "
Ablation SettingAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
Deepseek-7B0.7670.4316,9900.4670.2927,113
GRPO + len. reward0.7670.4385,2750.5330.3085,210
+ adv. reweighting0.7670.4585,3230.5670.3255,437
+ explicit penalty0.8000.4706,1040.5670.3456,308
", + "image_path": "9230674a58fba87161ad7c0744bfa24540aa98dea15fe70898e188356acfe870.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 164, + 523, + 199 + ], + "lines": [ + { + "bbox": [ + 67, + 164, + 523, + 199 + ], + "spans": [ + { + "bbox": [ + 67, + 164, + 523, + 199 + ], + "type": "text", + "content": "Table 1: Ablation results on AIME24 and AIME25. We report Cons@32 (accuracy through majority voting for 32 samplings), Pass@1, and the average token length (Lenavg). The best value in each column is in boldface, the second best is underlined." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 71, + 214, + 273, + 415 + ], + "blocks": [ + { + "bbox": [ + 71, + 214, + 273, + 415 + ], + "lines": [ + { + "bbox": [ + 71, + 214, + 273, + 415 + ], + "spans": [ + { + "bbox": [ + 71, + 214, + 273, + 415 + ], + "type": "image", + "image_path": "f58a619ee4c24ad833dcde93315c5269472b57b7d1effc226bf1d6b1b8ec1c8e.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 149, + 423, + 195, + 433 + ], + "lines": [ + { + "bbox": [ + 149, + 423, + 195, + 433 + ], + "spans": [ + { + "bbox": [ + 149, + 423, + 195, + 433 + ], + "type": "text", + "content": "(a) AIME24" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 320, + 215, + 521, + 415 + ], + "blocks": [ + { + "bbox": [ + 320, + 215, + 521, + 415 + ], + "lines": [ + { + "bbox": [ + 320, + 215, + 521, + 415 + ], + "spans": [ + { + "bbox": [ + 320, + 215, + 521, + 415 + ], + "type": "image", + "image_path": "f6114c3dc99b278ea5e4df33567114e0f0169d5067babb21babc251663674c47.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 398, + 422, + 444, + 433 + ], + "lines": [ + { + "bbox": [ + 398, + 422, + 444, + 433 + ], + "spans": [ + { + "bbox": [ + 398, + 422, + 444, + 433 + ], + "type": "text", + "content": "(b) AIME25" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 67, + 442, + 523, + 478 + ], + "lines": [ + { + "bbox": [ + 67, + 442, + 523, + 478 + ], + "spans": [ + { + "bbox": [ + 67, + 442, + 523, + 478 + ], + "type": "text", + "content": "Figure 3: Performance against inference budget for training done with different ablations of LEAD. GRPO with length reward (GRPO+L) largely enhances the performance at low budget settings compared to before training (DeepseekR1-7B)." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 500, + 290, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 500, + 290, + 526 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 290, + 526 + ], + "type": "text", + "content": "essary text without compromising overall performance." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "text", + "content": "Advantage Reweighting Encourages Model to Solve More Difficult Problems Further incorporating difficulty-aware advantage reweighting (GRPO+LAD) refines performance. On AIME24, Pass@1 increases from the GRPO+L stage by " + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "4.8\\%" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "(0.438 \\rightarrow 0.458)" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "text", + "content": ", while Cons@32 remains 0.767. For AIME25, both Pass@1 and Cons@32 improve by " + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "(0.308 \\rightarrow 0.325)" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "6.4\\%" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "inline_equation", + "content": "(0.533 \\rightarrow 0.567)" + }, + { + "bbox": [ + 67, + 544, + 290, + 774 + ], + "type": "text", + "content": ", respectively. As Figure 3 shows, GRPO+LAD demonstrates gains over GRPO+L in almost all budget regimes on AIME25 and for budgets exceeding 5k tokens on AIME24. These results indicate that advantage reweighting, by prioritizing challenging problems, strengthens reasoning robustness and mitigates over-reliance on simpler examples, thus validating its role in driving more reliable generalization." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "spans": [ + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": "Explicit Penalty for Incorrect Answers Regularizes Thinking Finally, introducing an explicit penalty for incorrect solutions (GRPO+LEAD) yields the highest Pass@1 scores. On AIME24, Pass@1 and Cons@32 improve from the GRPO+LAD stage by " + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "2.6\\%" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "0.458 \\to 0.470" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": ") and " + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "4.3\\%" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "0.767 \\to 0.800" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": "), respectively. On AIME25, Pass@1 also increases by " + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "6.2\\%" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": " (" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "inline_equation", + "content": "0.325 \\to 0.345" + }, + { + "bbox": [ + 302, + 500, + 525, + 771 + ], + "type": "text", + "content": "), as detailed in Table 1. Notably, these gains involve a modest increase in average solution length on AIME24 (from approximately 5,300 to 6,104 tokens). Figure 3 illustrates this trade-off, showing a performance sacrifice in low-budget regimes, though GRPO+LEAD still outperforms GRPO+LAD with budgets higher than 5k tokens on AIME25. These results suggest that the explicit penalty serves as a regularizer for the model to be more conservative about its reasoning. Such regularization boosts performance while requiring a slightly longer thinking process, which" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 129, + 68, + 465, + 151 + ], + "blocks": [ + { + "bbox": [ + 129, + 68, + 465, + 151 + ], + "lines": [ + { + "bbox": [ + 129, + 68, + 465, + 151 + ], + "spans": [ + { + "bbox": [ + 129, + 68, + 465, + 151 + ], + "type": "table", + "html": "
Model NameAIME24AIME25
Cons@32Pass@1LenavgCons@32Pass@1Lenavg
DeepSeek-14B0.8000.6149,1820.6330.42910,046
Light-R1-14B-DS0.8330.6419,5710.7670.50510,194
LEAD-stage10.8330.6298,7900.7670.5239,371
LEAD-stage20.8670.6508,2670.7670.5398,668
", + "image_path": "cb27064bad9d7d7aad0c36e8295a8fde63547fc8b426989787b2f2ac1bb59271.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 158, + 524, + 185 + ], + "lines": [ + { + "bbox": [ + 67, + 158, + 524, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 158, + 524, + 185 + ], + "type": "text", + "content": "Table 2: Comparison of model performance on AIME24 and AIME25, showing Cons@32, Pass@1, and average token length (Lenavg). The best value in each column is in boldface, the second best is underlined." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 205, + 289, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 205, + 289, + 230 + ], + "spans": [ + { + "bbox": [ + 67, + 205, + 289, + 230 + ], + "type": "text", + "content": "nevertheless remains shorter than the Deepseek-7B baseline." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 241, + 291, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 241, + 291, + 323 + ], + "spans": [ + { + "bbox": [ + 67, + 241, + 291, + 323 + ], + "type": "text", + "content": "Overall, these ablation results confirm that all three enhancements—length-dependent accuracy, difficulty-aware advantage reweighting, and explicit penalties—collectively reduce morbidity, strengthen mathematical skills on harder questions, and elevate precision in final predictions." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 333, + 224, + 346 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 224, + 346 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 224, + 346 + ], + "type": "text", + "content": "5.3 Comparison with Baselines" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 351, + 291, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 351, + 291, + 446 + ], + "spans": [ + { + "bbox": [ + 67, + 351, + 291, + 446 + ], + "type": "text", + "content": "We next evaluate GRPO-LEAD at the 14B scale and compare it against two strong baselines under a 14k-token generation budget: DeepSeek-14B and the state-of-the-art Light-R1-14B-DS. Table 2 presents results on AIME24 and AIME25, including both our intermediate model (LEAD-stage1) and our final model (LEAD-stage2)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "spans": [ + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "text", + "content": "AIME24 Performance LEAD-stage1 achieves a Cons@32 of 0.833, matching Light-R1-14B-DS and exceeding DeepSeek-14B by " + }, + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "inline_equation", + "content": "4.1\\%" + }, + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "text", + "content": ". Its Pass@1 outperforms DeepSeek-14B by " + }, + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "inline_equation", + "content": "2.4\\%" + }, + { + "bbox": [ + 67, + 454, + 291, + 618 + ], + "type": "text", + "content": " and closely approaches Light-R1-14B-DS. Crucially, LEAD-stage1 produces more concise responses than both baselines, with more than 800 tokens less on average. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (4% above Light-R1-14B-DS) and the best Pass@1, while reducing average solution length to 8,267 tokens." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "content": "AIME25 Performance LEAD-stage1 yields a Cons@32 of 0.767, matching Light-R1-14B-DS and exceeding DeepSeek-14B by " + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "inline_equation", + "content": "21.2\\%" + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "content": ". Its Pass@1 (0.523) outperforms DeepSeek-14B by " + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "inline_equation", + "content": "21.9\\%" + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "content": " and Light-R1-14B-DS by " + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "inline_equation", + "content": "3.6\\%" + }, + { + "bbox": [ + 67, + 626, + 291, + 775 + ], + "type": "text", + "content": ". Crucially, LEAD-stage1 produces more concise responses than both baselines, with its solutions averaging 9,371 tokens. Building on these gains, LEAD-stage2 pushes performance further, delivering the highest Cons@32 (matching Light-R1-14B-DS at 0.767) and the best Pass@1 (0.539), while reducing" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 205, + 480, + 218 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 205, + 480, + 218 + ], + "spans": [ + { + "bbox": [ + 302, + 205, + 480, + 218 + ], + "type": "text", + "content": "average solution length to 8,668 tokens." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 218, + 526, + 380 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 218, + 526, + 380 + ], + "spans": [ + { + "bbox": [ + 302, + 218, + 526, + 380 + ], + "type": "text", + "content": "Overall, both LEAD-stage1 and LEAD-stage2 deliver substantial improvements over DeepSeek-14B and Light-R1-14B-DS, simultaneously boosting correctness and conciseness under a constrained (14k-token) budget. Remarkably, training LEAD-stage1 for just 100 steps—requiring only about 24 hours on eight H20 GPUs—already matches Light-R1-14B-DS on Cons@32 and outperforms it on AIME25 Pass@1 while producing shorter solutions, underscoring the practical efficiency of GRPO-LEAD for large-scale math problem-solving." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 391, + 381, + 403 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 391, + 381, + 403 + ], + "spans": [ + { + "bbox": [ + 302, + 391, + 381, + 403 + ], + "type": "text", + "content": "6 Conclusion" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 412, + 526, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 412, + 526, + 560 + ], + "spans": [ + { + "bbox": [ + 302, + 412, + 526, + 560 + ], + "type": "text", + "content": "We introduced GRPO-LEAD, a reinforcement learning framework designed for mathematical reasoning tasks. By extending Group Relative Policy Optimization with three major components—(1) a length-dependent accuracy reward to discourage overly verbose solutions, (2) an explicit negative penalty that clarifies the boundary between correct and incorrect answers, and (3) a difficulty-aware advantage reweighting scheme to prioritize tougher problems—GRPO-LEAD addresses key challenges in structured problem-solving." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 561, + 526, + 751 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 561, + 526, + 751 + ], + "spans": [ + { + "bbox": [ + 302, + 561, + 526, + 751 + ], + "type": "text", + "content": "Empirical evaluations on two AIME benchmarks show that GRPO-LEAD not only speeds up convergence but also strengthens the model's reasoning capability while keeping solution paths concise. Our 14B-scale experiments further confirm that GRPO-LEAD achieves state-of-the-art performance by balancing output brevity with high problem-solving accuracy. Although open questions remain—particularly in managing partial correctness and extending these techniques to broader domains—our findings suggest that reward shaping and difficulty modeling are pivotal in developing more robust and aligned language models for complex mathematical reasoning." + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 70, + 148, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 70, + 148, + 84 + ], + "spans": [ + { + "bbox": [ + 69, + 70, + 148, + 84 + ], + "type": "text", + "content": "7 Limitations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 92, + 290, + 213 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 92, + 290, + 213 + ], + "spans": [ + { + "bbox": [ + 69, + 92, + 290, + 213 + ], + "type": "text", + "content": "Although our techniques for encouraging concise solutions and difficulty-balanced learning may transfer to other domains, the gains reported here are specific to mathematical reasoning tasks. Further studies are needed to evaluate the effectiveness of GRPO-LEAD on broader question-answering or logical reasoning domains, where correctness signals and domain structures can differ substantially." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 215, + 290, + 443 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 215, + 290, + 443 + ], + "spans": [ + { + "bbox": [ + 69, + 215, + 290, + 443 + ], + "type": "text", + "content": "Additionally, we only have access to a limited amount of compute, which prevents us from conducting more comprehensive experiments. For instance, we currently cannot provide the validation curve for the 7B model in the ablation study that adds an explicit penalty. This is due to an error in the validation code after upgrading to the newest VERL version, and we currently do not have the compute to reproduce it. A comparison with the original GRPO model is also missing, except for the curve shown in Figure 2, because the checkpoint was stored on a rented server that was automatically released as we were writing the paper. We also couldn't formally perform a hyperparameter search to showcase the rationale behind choosing the hyperparameters for our designed modifications." + } + ] + } + ], + "index": 2 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 127, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 90, + 289, + 774 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 69, + 90, + 289, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 90, + 289, + 134 + ], + "spans": [ + { + "bbox": [ + 69, + 90, + 289, + 134 + ], + "type": "text", + "content": "Pranjal Aggarwal and Sean Welleck. 2025. L1: Controlling how long a reasoning model thinks with reinforcement learning. arXiv preprint arXiv:2503.04697." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 142, + 289, + 188 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 142, + 289, + 188 + ], + "spans": [ + { + "bbox": [ + 69, + 142, + 289, + 188 + ], + "type": "text", + "content": "Xiangxiang Chu, Hailang Huang, Xiao Zhang, Fei Wei, and Yong Wang. 2025. Gpg: A simple and strong reinforcement learning baseline for model reasoning. arXiv preprint arXiv:2504.02546." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 195, + 289, + 230 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 195, + 289, + 230 + ], + "spans": [ + { + "bbox": [ + 69, + 195, + 289, + 230 + ], + "type": "text", + "content": "Muzhi Dai, Chenxu Yang, and Qingyi Si. 2025. S-grpo: Early exit via reinforcement learning in reasoning models. arXiv preprint arXiv:2505.07686." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 238, + 289, + 282 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 238, + 289, + 282 + ], + "spans": [ + { + "bbox": [ + 69, + 238, + 289, + 282 + ], + "type": "text", + "content": "Tom Everitt, Victoria Krakovna, Laurent Orseau, Marcus Hutter, and Shane Legg. 2017. Reinforcement learning with a corrupted reward channel. arXiv preprint arXiv:1705.08417." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 290, + 289, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 290, + 289, + 401 + ], + "spans": [ + { + "bbox": [ + 69, + 290, + 289, + 401 + ], + "type": "text", + "content": "Simon Frieder, Sam Bealing, Armenii Nikolaiev, Geoff C. Smith, Kevin Buzzard, Timothy Gowers, Peter J. Liu, Po-Shen Loh, Lester Mackey, Leonardo de Moura, Dan Roberts, D. Sculley, Terence Tao, David Balduzzi, Simon Coyle, Alex Gerko, Ryan Holbrook, Addison Howard, and XTX Markets. 2024. Ai mathematical olympiad - progress prize 2. https://kaggle.com/competitions/ ai-mathematical-olympiad-progress-prize-2. Kaggle." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 409, + 289, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 409, + 289, + 454 + ], + "spans": [ + { + "bbox": [ + 69, + 409, + 289, + 454 + ], + "type": "text", + "content": "Leo Gao, John Schulman, and Jacob Hilton. 2023. Scaling laws for reward model overoptimization. In International Conference on Machine Learning, pages 10835-10866. PMLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 462, + 289, + 529 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 462, + 289, + 529 + ], + "spans": [ + { + "bbox": [ + 69, + 462, + 289, + 529 + ], + "type": "text", + "content": "Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, and 1 others. 2025. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 538, + 289, + 593 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 538, + 289, + 593 + ], + "spans": [ + { + "bbox": [ + 69, + 538, + 289, + 593 + ], + "type": "text", + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xi-angyu Zhang, and Heung-Yeung Shum. 2025a. Open reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 601, + 289, + 657 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 601, + 289, + 657 + ], + "spans": [ + { + "bbox": [ + 69, + 601, + 289, + 657 + ], + "type": "text", + "content": "Jingcheng Hu, Yinmin Zhang, Qi Han, Daxin Jiang, Xiangyu Zhang, and Heung-Yeung Shum. 2025b. Open-reasoner-zero: An open source approach to scaling up reinforcement learning on the base model. arXiv preprint arXiv:2503.24290." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 665, + 289, + 710 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 665, + 289, + 710 + ], + "spans": [ + { + "bbox": [ + 69, + 665, + 289, + 710 + ], + "type": "text", + "content": "Xuying Li, Zhuo Li, Yuji Kosuga, and Victor Bian. 2025. Optimizing safe and aligned language generation: A multi-objective grpo approach. arXiv preprint arXiv:2503.21819." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 718, + 289, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 718, + 289, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 718, + 289, + 774 + ], + "type": "text", + "content": "Jie Liu, Gongye Liu, Jiajun Liang, Yangguang Li, Jiaheng Liu, Xintao Wang, Pengfei Wan, Di Zhang, and Wanli Ouyang. 2025a. Flow-grpo: Training flow matching models via online rl. arXiv preprint arXiv:2505.05470." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 774 + ], + "type": "list", + "angle": 0, + "index": 25, + "blocks": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 525, + 117 + ], + "type": "text", + "content": "Zichen Liu, Changyu Chen, Wenjun Li, Penghui Qi, Tianyu Pang, Chao Du, Wee Sun Lee, and Min Lin. 2025b. Understanding r1-zero-like training: A critical perspective. arXiv preprint arXiv:2503.20783." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 127, + 525, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 127, + 525, + 195 + ], + "spans": [ + { + "bbox": [ + 304, + 127, + 525, + 195 + ], + "type": "text", + "content": "Michael Luo, Sijun Tan, Roy Huang, Ameen Patel, Alpay Ariyak, Qingyang Wu, Xiaoxiang Shi, Rachel Xin, Colin Cai, Maurice Weber, Ce Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025a. Deepcoder: A fully open-source 14b coder at o3-mini level. . Notion Blog." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 206, + 525, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 206, + 525, + 272 + ], + "spans": [ + { + "bbox": [ + 304, + 206, + 525, + 272 + ], + "type": "text", + "content": "Michael Luo, Sijun Tan, Justin Wong, Xiaoxiang Shi, William Y. Tang, Manan Roongta, Colin Cai, Jeffrey Luo, Tianjun Zhang, Li Erran Li, Raluca Ada Popa, and Ion Stoica. 2025b. Deepscaler: Surpassing o1-preview with a 1.5b model by scaling rl. . Notion Blog." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 283, + 525, + 349 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 283, + 525, + 349 + ], + "spans": [ + { + "bbox": [ + 304, + 283, + 525, + 349 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 361, + 525, + 406 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 361, + 525, + 406 + ], + "spans": [ + { + "bbox": [ + 304, + 361, + 525, + 406 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "spans": [ + { + "bbox": [ + 304, + 417, + 525, + 483 + ], + "type": "text", + "content": "Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, and 1 others. 2024. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 494, + 525, + 550 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 494, + 525, + 550 + ], + "spans": [ + { + "bbox": [ + 304, + 494, + 525, + 550 + ], + "type": "text", + "content": "Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. 2024. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 560, + 521, + 573 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 560, + 521, + 573 + ], + "spans": [ + { + "bbox": [ + 304, + 560, + 521, + 573 + ], + "type": "text", + "content": "Zhiqing Sun. 2024. Aimo-cmu/math/cmu/math-aimo." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 584, + 525, + 640 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 584, + 525, + 640 + ], + "spans": [ + { + "bbox": [ + 304, + 584, + 525, + 640 + ], + "type": "text", + "content": "Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, and 1 others. 2025. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 650, + 525, + 673 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 650, + 525, + 673 + ], + "spans": [ + { + "bbox": [ + 304, + 650, + 525, + 673 + ], + "type": "text", + "content": "Qwen Team. 2025. Qwq-32b: Embracing the power of reinforcement learning." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 684, + 525, + 740 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 684, + 525, + 740 + ], + "spans": [ + { + "bbox": [ + 304, + 684, + 525, + 740 + ], + "type": "text", + "content": "Liang Wen, Yunke Cai, Fenrui Xiao, Xin He, Qi An, Zhenyu Duan, Yimin Du, Junchen Liu, Lifu Tang, Xiaowei Lv, and 1 others. 2025. Light-r1: Curriculum sft, dpo and r1 for long cot from scratch and beyond. arXiv preprint arXiv:2503.10460." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 304, + 751, + 525, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 751, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 751, + 525, + 774 + ], + "type": "text", + "content": "Lilian Weng. 2024. Reward hacking in reinforcement learning. _lianweng.github.io_." + } + ] + } + ], + "index": 24 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 191 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "text", + "content": "An Yang, Beichen Zhang, Binyuan Hui, Bofei Gao, Bowen Yu, Chengpeng Li, Dayiheng Liu, Jianhong Tu, Jingren Zhou, Junyang Lin, and 1 others. 2024. Qwen2. 5-math technical report: Toward mathematical expert model via self-improvement. arXiv preprint arXiv:2409.12122." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 147, + 291, + 191 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 147, + 291, + 191 + ], + "spans": [ + { + "bbox": [ + 69, + 147, + 291, + 191 + ], + "type": "text", + "content": "Edward Yeo, Yuxuan Tong, Morry Niu, Graham Neubig, and Xiang Yue. 2025. Demystifying long chain-of-thought reasoning in llms. arXiv preprint arXiv:2502.03373." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 68, + 241, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 68, + 241, + 83 + ], + "spans": [ + { + "bbox": [ + 70, + 68, + 241, + 83 + ], + "type": "text", + "content": "A Evaluations on Coding Tasks" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 91, + 290, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 91, + 290, + 185 + ], + "spans": [ + { + "bbox": [ + 67, + 91, + 290, + 185 + ], + "type": "text", + "content": "We evaluate our proposed LEAD-14B model against the original DeepSeek-R1-Distill-Qwen-14B baseline on the LiveCodeBench benchmark under a maximum sequence length of 8k tokens. The dataset version used is release_v5, consisting of 880 code generation tasks. Results are summarized in Table 3." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 188, + 291, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 188, + 291, + 390 + ], + "spans": [ + { + "bbox": [ + 69, + 188, + 291, + 390 + ], + "type": "text", + "content": "As shown above, LEAD-14B achieves higher accuracy (0.5156 vs. 0.5103) while producing slightly longer completions. This suggests that our method enhances reasoning capability in code generation. Regarding the observed increase in chain-of-thought (CoT) length, we hypothesize that this effect arises because our training focused exclusively on mathematical reasoning datasets. While our method compresses reasoning paths in math domains, such compression does not appear to generalize as effectively to code. Combined with the improved reasoning capability that may increase the overall reasoning path, this may explain why generated sequences are overall longer in coderelated tasks." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 401, + 258, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 401, + 258, + 428 + ], + "spans": [ + { + "bbox": [ + 68, + 401, + 258, + 428 + ], + "type": "text", + "content": "B Detailed Analysis on AIME25 by Difficulty" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 436, + 290, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 290, + 516 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 290, + 516 + ], + "type": "text", + "content": "To further analyze model performance, we stratified the AIME25 dataset into three difficulty levels based on the problem number: normal (problems 1-5), difficult (problems 6-10), and highly difficult (problems 11-15). The detailed evaluation results for each stratum are presented in Table 4." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "text", + "content": "The stratified results in Table 4 support the hypothesis that advantage reweighting enhances a model's ability to solve more difficult problems. This is evidenced by the widening performance gap in Pass@1 between GRPO+L and GRPO+LAD as problem difficulty increases. For normal problems, GRPO+LAD offers a modest " + }, + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "inline_equation", + "content": "1.95\\%" + }, + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "text", + "content": " improvement over GRPO+L. This margin increases substantially to " + }, + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "inline_equation", + "content": "13.7\\%" + }, + { + "bbox": [ + 67, + 518, + 290, + 666 + ], + "type": "text", + "content": " for difficult problems, indicating that the benefits of advantage reweighting are more pronounced in challenging scenarios." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 667, + 290, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 667, + 290, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 667, + 290, + 775 + ], + "type": "text", + "content": "For highly difficult problems, the Pass@1 scores for GRPO+L and GRPO+LAD are identical. Neither method incorporates an explicit penalty for incorrect answers, making them susceptible to generating numerous wrong solutions. This tendency leads to unstable majority voting-based accuracy (Cons@32), a vulnerability that is magnified by the intrinsic difficulty of the problems." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 69, + 525, + 259 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 69, + 525, + 259 + ], + "spans": [ + { + "bbox": [ + 302, + 69, + 525, + 259 + ], + "type": "text", + "content": "In contrast, the introduction of an explicit penalty in GRPO+LEAD demonstrates a clear regularization effect. On the most difficult problem set, GRPO+LEAD achieves the highest accuracy (Cons@32 of 0.4) and more than doubles the precision of both GRPO+L (0.172) and GRPO+LAD (0.156); the number of correct answers generated by GRPO+LEAD is comparable to both GRPO+L and GRPO+LAD, despite generating much fewer total answers. This validates our hypothesis that the explicit penalty effectively \"regularizes thinking\", discouraging the kind of hasty and incorrect responses that the length reward tends to encourage otherwise." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 303, + 270, + 489, + 297 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 270, + 489, + 297 + ], + "spans": [ + { + "bbox": [ + 303, + 270, + 489, + 297 + ], + "type": "text", + "content": "C Qualitative Analysis of Solution Conciseness" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 306, + 525, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 306, + 525, + 400 + ], + "spans": [ + { + "bbox": [ + 302, + 306, + 525, + 400 + ], + "type": "text", + "content": "To provide a qualitative illustration of how the length reward enhances conciseness, we contrast the shortest correct solutions generated by GRPO+L and the baseline Deepseek-7B for the same problem (Problem 3, AIME 25 I). Table 5 breaks down the comparison across key aspects of readability and reasoning structure." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 401, + 525, + 535 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 401, + 525, + 535 + ], + "spans": [ + { + "bbox": [ + 302, + 401, + 525, + 535 + ], + "type": "text", + "content": "As the comparison highlights, the GRPO+L model produces a tight, step-by-step solution that remains focused, avoids repetition, and concludes efficiently. In contrast, the Deepseek-7B baseline's reasoning path is less direct, characterized by repeated self-checks and conversational digressions that nearly double the total length and reduce clarity. This case study demonstrates that our length-reward mechanism successfully encourages a more disciplined and economical reasoning style." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 72, + 71, + 520, + 125 + ], + "blocks": [ + { + "bbox": [ + 72, + 71, + 520, + 125 + ], + "lines": [ + { + "bbox": [ + 72, + 71, + 520, + 125 + ], + "spans": [ + { + "bbox": [ + 72, + 71, + 520, + 125 + ], + "type": "table", + "html": "
ModelAccuracyAvg. Tokens (Overall)EasyMediumHard
LEAD-14B0.51566322399869128000
DeepSeek-R1-Distill-Qwen-14B0.51035794304664297856
", + "image_path": "8e247d0c876304cde98fffbdce30b4e7b15bb0ca2c04c96aaa8a2c38f227604e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 103, + 169, + 490, + 427 + ], + "blocks": [ + { + "bbox": [ + 67, + 133, + 524, + 158 + ], + "lines": [ + { + "bbox": [ + 67, + 133, + 524, + 158 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 524, + 158 + ], + "type": "text", + "content": "Table 3: Performance on LiveCodeBench (release_v5) with maximum sequence length of 8k tokens. All token counts are rounded to the nearest integer." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 103, + 169, + 490, + 427 + ], + "lines": [ + { + "bbox": [ + 103, + 169, + 490, + 427 + ], + "spans": [ + { + "bbox": [ + 103, + 169, + 490, + 427 + ], + "type": "table", + "html": "
ModelCons@32Avg. CorrectAvg. AnswerPrecisionPass@1
Normal Problems (1–5)
Deepseek-7B0.818.820.30.7080.588
GRPO + L0.819.727.60.6310.616
GRPO + LAD0.920.126.90.6870.628
GRPO + LEAD0.822.024.50.7230.688
Difficult Problems (6–10)
Deepseek-7B0.48.313.80.4040.259
GRPO + L0.58.624.10.4120.269
GRPO + LAD0.69.824.20.4480.306
GRPO + LEAD0.69.720.00.4210.303
Highly Difficult Problems (11–15)
Deepseek-7B0.20.92.00.2300.028
GRPO + L0.31.313.90.1720.041
GRPO + LAD0.21.314.60.1560.041
GRPO + LEAD0.41.57.70.3550.047
", + "image_path": "478b1b6e2f5432a216d7f533b5b55e9c849b458bb79fec9b9d88a6449e47522a.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 73, + 482, + 520, + 736 + ], + "blocks": [ + { + "bbox": [ + 67, + 435, + 525, + 471 + ], + "lines": [ + { + "bbox": [ + 67, + 435, + 525, + 471 + ], + "spans": [ + { + "bbox": [ + 67, + 435, + 525, + 471 + ], + "type": "text", + "content": "Table 4: Consolidated evaluation results on AIME25, stratified by problem difficulty. Avg. Answers refers to the number of outputs that have completed within the 8k token budget and produce some, where as Avg. Correct refers to the correct answers. Precision is Avg. Correct/ Avg. Answers." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 73, + 482, + 520, + 736 + ], + "lines": [ + { + "bbox": [ + 73, + 482, + 520, + 736 + ], + "spans": [ + { + "bbox": [ + 73, + 482, + 520, + 736 + ], + "type": "table", + "html": "
AspectGRPO+LDeepseek-7B
Structure & flow“Step 1: enumerate all possible triples ... Step 2: compute the multinomial coefficient ... Step 3: sum and mod.”“Okay ... let me parse this step by step ... but wait, hold on ... let me verify the triples again ...”
Redundancy“...hence, all possible triples: (6, 2, 1), (5, 3, 1), (4, 3, 2).”“So, the possible triples ... So, three triples in total ... Wait, hold on, let me check if there are more ... So, total three triples.”
Conciseness of language“Total N = 2016. Therefore, the remainder is 16.”“Wait, hold on a second. ... Maybe I can think of all possible partitions ... No, I think the only possible triples are the three we found.”
Logical signposting“Case 1: s = 1 ... Case 2: s = 2 ... Case 3: s = 3 (no solutions).”“Case 1: S = 1 ... Subcase 1a ... Subcase 1b ... (digression) ... Case 3: S = 3 ... no solutions ... (returns to earlier cases).”
Error-checking“Only three possible triples, so the computation is complete.”“Wait, hold on a second. Is that all? ... let me verify the triples again ... maybe there are other triples?”
LengthEntire solution ≈ 200 words.Entire solution ≈ 370 words (many repeated sentences such as “So, I think 16 is the answer”).
", + "image_path": "bb21394a055c4e6334099714496cb564eb5a23dcb6c328d6057f393534a6b2f9.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 745, + 525, + 770 + ], + "lines": [ + { + "bbox": [ + 67, + 745, + 525, + 770 + ], + "spans": [ + { + "bbox": [ + 67, + 745, + 525, + 770 + ], + "type": "text", + "content": "Table 5: Qualitative comparison of the shortest correct rollouts from GRPO+L and Deepseek-7B for AIME 25 I, Problem 3. Italicized text in the Deepseek-7B column represents meta-commentary or self-correction loops." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_content_list.json b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..3995d69fe34cb690456c727676d961761525a8cd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_content_list.json @@ -0,0 +1,1875 @@ +[ + { + "type": "text", + "text": "DUMP: Automated Distribution-Level Curriculum Learning for RL-based LLM Post-training", + "text_level": 1, + "bbox": [ + 187, + 122, + 810, + 174 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Zhenting Wang", + "bbox": [ + 192, + 224, + 310, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Guofeng Cui", + "bbox": [ + 338, + 224, + 437, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Yu-Jhe Li", + "bbox": [ + 465, + 224, + 549, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Kun Wan\\*", + "bbox": [ + 575, + 224, + 658, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Wentian Zhao\\*", + "bbox": [ + 687, + 224, + 802, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Rutgers University $^{2}$ Adobe Inc.", + "bbox": [ + 383, + 260, + 612, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 459, + 311, + 537, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Recent advances in reinforcement learning (RL)-based post-training have led to notable improvements in large language models (LLMs), particularly in enhancing their reasoning capabilities to handle complex tasks. However, most existing methods treat the training data as a unified whole, overlooking the fact that modern LLM training often involves a mixture of data from diverse distributions—varying in both source and difficulty. This heterogeneity introduces a key challenge: how to adaptively schedule training across distributions to optimize learning efficiency. In this paper, we present a principled curriculum learning framework grounded in the notion of distribution-level learnability. Our core insight is that the magnitude of policy advantages reflects how much a model can still benefit from further training on a given distribution. Based on this, we propose a distribution-level curriculum learning framework for RL-based LLM post-training, which leverages the Upper Confidence Bound (UCB) principle to dynamically adjust sampling probabilities for different distributions. This approach prioritizes distributions with either high average advantage (exploitation) or low sample count (exploration), yielding an adaptive and theoretically grounded training schedule. We instantiate our curriculum learning framework with GRPO as the underlying RL algorithm and demonstrate its effectiveness on logic reasoning datasets with multiple difficulties and sources. Our experiments show that our framework significantly improves convergence speed and final performance, highlighting the value of distribution-aware curriculum strategies in LLM post-training. Code: https://github.com/ZhentingWang/DUMP.", + "bbox": [ + 228, + 342, + 767, + 647 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 171, + 671, + 313, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Reinforcement learning (RL)-based post-training has emerged as a powerful approach for enhancing the capabilities of large language models (LLMs), particularly in areas requiring structured reasoning, multi-step inference, and task-specific generalization [1-4]. By leveraging reward signals derived from task performance, human feedback, or domain-specific metrics, RL provides a flexible alternative to supervised fine-tuning. Unlike imitation-based methods that merely mimic reference outputs, RL-based approaches allow models to optimize directly toward behavioral objectives, making them especially effective for boosting model performance on complex reasoning and agentic tasks.", + "bbox": [ + 169, + 700, + 826, + 800 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "While RL-based post-training has become a key technique for enhancing LLM capabilities in reasoning, alignment, and coding, one foundational challenge remains underexplored: how to dynamically schedule training across heterogeneous data distributions. In practice, LLMs are post-trained on datasets drawn from a wide variety of sources—ranging from factual QA to math problems and coding tasks—each differing in knowledge/capability relevance, and learning difficulty [5-7]. This heterogeneity is evident in large-scale post-training datasets such as Tulu 3 [7], where prompts span general dialogue, logic puzzles, STEM problems, and multilingual instructions, with", + "bbox": [ + 169, + 804, + 826, + 905 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09710v3 [cs.LG] 11 Oct 2025", + "bbox": [ + 22, + 277, + 57, + 717 + ], + "page_idx": 0 + }, + { + "type": "footer", + "text": "Preprint.", + "bbox": [ + 171, + 922, + 227, + 936 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "widely varying counts, formats, and alignment objectives. More recently, next-generation post-training pipelines (e.g., Seed-Thinking v1.5 [8]) have shifted toward synthetic data generation with controllable parameters—e.g., configuring logical puzzle difficulty. This allows fine-grained control over the data distribution, making distribution-level curriculum learning both feasible and increasingly important. Despite this, most RL-based pipelines still treat all data distributions equally—uniformly sampling tasks throughout training or relying on static, hand-designed curricula. This static treatment ignores the model's evolving learning needs and underutilizes the training budget. Moreover, it is difficult to handcraft effective curricula when the post-training data comes from multiple distributions lacking clear difficulty labels. As reinforcement learning becomes increasingly used in post-training and training costs continue to rise, a data-driven curriculum mechanism that dynamically prioritizes learnable distributions is not just desirable, but necessary.", + "bbox": [ + 169, + 90, + 826, + 243 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "This motivates the need for automated distribution-level curriculum learning: a dynamic strategy that adjusts sampling probabilities across data distributions throughout training. While prior work has explored instance-level curricula based on sample difficulty [9], and static/heuristic multi-stage schedules have been applied in LLM post-training [10, 11], little attention has been paid to automated, distribution-level scheduling—especially in the context of RL for capability-oriented post-training. The central challenge lies in identifying signals that reflect the current learnability of each distribution and in designing algorithms that can stably and efficiently leverage these signals to guide sampling.", + "bbox": [ + 169, + 250, + 826, + 348 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this paper, we present DUMP (Automated Distribution-level cUrriculumM learning for RL-based LLM Post-training), a simple but theoretically grounded approach to address this challenge. Our central insight is that the magnitude of policy advantages—the expected absolute difference between a model's predicted return and its baseline value—serves as a natural proxy for distribution-level learnability. High advantages on specific data distribution indicate underfitting and high potential for improvement on it, while low advantages suggest diminishing returns. Moreover, the statistical reliability of these advantage estimates improves with the number of samples drawn from each distribution. DUMP operationalizes this insight by using bandit-style Upper Confidence Bound (UCB) scores to schedule distribution sampling. It maintains a sliding window of recent advantage magnitudes for each distribution and computes a score that balances exploitation (high advantage) and exploration (low visitation). These scores are normalized via a softmax to form sampling weights, which are then used to generate training batches. Unlike fixed or heuristic curricula, DUMP adapts throughout training based on empirical signals, and can be seamlessly integrated into standard LLM RL pipelines. We instantiate DUMP with GRPO [3], but the method is compatible with any advantage-based RL algorithm. We evaluate DUMP on logic reasoning corpora. Our experiments show that DUMP significantly accelerates convergence and yields stronger performance compared to uniform sampling. Furthermore, we provide theoretical analysis that supports the use of absolute advantages as a surrogate for distribution-level learnability, formalizing its connection to sample efficiency and regret minimization.", + "bbox": [ + 169, + 353, + 826, + 616 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "We summarize our contributions as follows. ① We highlight the underexplored challenge of curriculum learning at the distribution level for RL-based post-training aimed at capability enhancement. ② We propose DUMP, a theoretically grounded framework that leverages advantage-based UCB scores to adaptively guide training over data distributions. ③ We demonstrate DUMP's effectiveness through empirical results and theoretical analysis, showing that it enables faster, more efficient improvement on LLM capabilities.", + "bbox": [ + 169, + 622, + 828, + 705 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Background", + "text_level": 1, + "bbox": [ + 171, + 720, + 310, + 736 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "RL-based LLM Post-training. Reinforcement learning (RL) plays a central role in post-training large language models (LLMs), especially for tasks involving reasoning, subjective preference, or long-horizon control. The RLHF framework [1, 12-15] laid the foundation by aligning models using reward signals derived from human preferences. Beyond preference alignment, recent RL-based post-training approaches have notably enhanced LLMs' capabilities in complex reasoning tasks, particularly coding and mathematics. For instance, RL post-trained model OpenAI o1 [16], o3 [17, 18], DeepSeek-R1 [4] significantly outperform LLMs without RL post-training such as pre-trained versions of GPT-4o [19] and DeepSeek-V3 [20] on challenging mathematics and coding benchmarks (e.g., AIME [21] and Codeforces [22]). Proximal Policy Optimization (PPO) [23] is widely used in post-training due to its clipped objective, which stabilizes training by preventing large policy updates. PPO remains a strong baseline in many LLM alignment settings. Direct Preference Optimization (DPO) [2] simplifies the pipeline by replacing RL rollouts with a classification-style loss", + "bbox": [ + 169, + 744, + 828, + 912 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "derived from a KL-constrained reward maximization objective. While DPO works well on pairwise preference data, it does not naturally support group-wise or comparative feedback. Group Relative Policy Optimization (GRPO) [3] addresses this limitation by leveraging group-based feedback. For each input prompt $x$ , GRPO samples a group of $G$ candidate outputs $\\{o_1, \\ldots, o_G\\} \\sim \\pi_{\\mathrm{ref}}(\\cdot | x)$ from a frozen reference policy $\\pi_{\\mathrm{ref}}$ . Each output $o_i$ is assigned a reward $r_i$ , and the advantage of $o_i$ is computed by normalizing its reward relative to others in the group:", + "bbox": [ + 169, + 90, + 823, + 175 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right) + \\epsilon}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 387, + 184, + 825, + 217 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\epsilon > 0$ is a small constant for numerical stability. These normalized advantages capture the relative quality of outputs within the group. The model policy $\\pi_{\\theta}$ is then updated by maximizing the following clipped surrogate objective:", + "bbox": [ + 169, + 224, + 823, + 266 + ], + "page_idx": 2 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {x, \\left\\{o _ {i} \\right\\}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)} \\hat {A} _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 173, + 273, + 823, + 323 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "where $\\pi_{\\theta}(o_i|x)$ is the probability assigned by the current model to output $o_i$ , $\\pi_{\\mathrm{old}}(o_i|x)$ is the same under the model from previous step, and $\\pi_{\\mathrm{ref}}(o_i|x)$ is that under the reference model. The first term inside the summation is a clipped policy ratio scaled by $\\hat{A}_i$ , similar to PPO [23], which prevents overly large updates. The outer expectation is taken over prompts $x$ and their sampled output groups $\\{o_i\\}$ . The second term is a KL divergence penalty that regularizes the updated policy $\\pi_{\\theta}$ to stay close to $\\pi_{\\mathrm{ref}}$ , weighted by a hyperparameter $\\beta$ . This formulation eliminates the need for an explicit value baseline and stabilizes training by comparing outputs within local groups.", + "bbox": [ + 169, + 323, + 823, + 422 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Curriculum Learning for RL. Curriculum learning [24, 25] organizes training by progressing from easy to hard examples. In RL, curricula often follow task complexity [26-28], or are learned via teacher-student frameworks modeled as partially observable Markov decision process [29, 30]. With the adoption of RL in LLM post-training, curriculum learning has shown potential for improving both training efficiency and model effectiveness. For example, Curri-DPO [9] constructs instance-level curricula by ranking preference pairs based on the score gap between preferred and dispreferred responses, introducing harder pairs gradually during DPO fine-tuning. Kimi k1.5 [10] and LogicRL [11], on the other hand, use manually defined heuristic curricula with fixed training stages, e.g., models are first trained on \"easy\" samples for a pre-specified number of steps, then switched to \"hard\" samples. These strategies rely on static schedules and heuristic difficulty labels, without adapting to the model's learning progress. While these works demonstrate the benefit of curriculum learning in LLM post-training, most existing approaches focus on instance-level difficulty or use static, manually designed strategies. In contrast, automatic curriculum learning at the distribution level, especially in RL-based post-training, remains underexplored. In this paper, we propose DUMP to fill this gap by adaptively scheduling training over distributions using advantage-based learnability signals.", + "bbox": [ + 169, + 429, + 826, + 637 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3 Method", + "text_level": 1, + "bbox": [ + 171, + 651, + 272, + 666 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "In this section, we introduce DUMP, a distribution-level curriculum learning framework for RL-based LLM post-training. We first introduce expected absolute advantage as a proxy for learnability, and formalize the scheduling problem as a multi-armed bandit. We then describe a UCB-based strategy to guide distribution selection, followed by the full implementation of DUMP.", + "bbox": [ + 169, + 676, + 826, + 733 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.1 Measuring Learnability via Absolute Advantage", + "text_level": 1, + "bbox": [ + 171, + 747, + 549, + 762 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "We aim to dynamically assess the usefulness of different data distributions during LLM reinforcement learning post-training. Intuitively, a distribution is more useful (or \"learnable\") if the model can gain more from training on its samples. To help understand and measure the learnability of the data samples from different distributions, we provide the following theorem:", + "bbox": [ + 169, + 768, + 823, + 825 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Theorem 3.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy $\\pi_{\\theta}$ and a data distribution $d$ , the expected absolute advantage $\\mathbb{E}_{x \\sim d} \\left[ \\mathbb{E}_{o_i \\sim \\pi_{\\theta}(\\cdot | x)} \\left[ |\\hat{A}_i| \\right] \\right]$ serves as a proxy for how much that distribution $d$ can help the model improve, where the distribution $d$ consisting of prompts $x \\sim d$ , each prompt has a group of sampled outputs $\\{o_1, \\ldots, o_n\\}$ , and $\\hat{A}_i$ denotes the advantage of output $o_i$ .", + "bbox": [ + 169, + 830, + 826, + 912 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "The proof can be found in Appendix A. Intuitively, if training on a distribution results in a larger expected advantage magnitude, then that distribution is considered more learnable. The advantage function measures the deviation between an action's predicted value and its actual return; a large advantage—either positive or negative—indicates that the model's current policy is still far from optimal on those samples but has a large potential to improve. A small advantage magnitude does not necessarily imply mastery—it may also occur when a task is too difficult or noisy for the model to learn from effectively, resulting in weak or unstable learning signals. To capture this deviation in both directions, we take the absolute value of the advantage. Without this, positive and negative advantages within a batch may cancel out, masking the true extent of the model's uncertainty or suboptimality. By averaging the absolute advantage over multiple sampled outputs and prompts, we obtain a robust estimate of how much learning signal remains in a given distribution. This expected absolute advantage thus acts as a practical proxy for distribution-level learnability: it reflects how much the model can benefit from training on that distribution. It also has the strength of being lightweight to compute in RL pipelines, as advantage estimates are already generated during rollout.", + "bbox": [ + 169, + 90, + 826, + 285 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "3.2 Formalizing Distribution-Level Curriculum Learning as Multi-armed Bandit", + "text_level": 1, + "bbox": [ + 169, + 297, + 748, + 311 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "We aim to design a curriculum learning strategy that dynamically allocates training focus across multiple data distributions to maximize overall model improvement. Let $\\mathcal{D} = \\{d_1, \\dots, d_N\\}$ be a set of data distributions. At each training step, we sample a batch of examples $\\mathcal{B}_t$ by drawing prompts from these distributions according to a learnable sampling policy, and use the batch to update model parameters $\\theta$ via reinforcement learning. The goal is to assign higher sampling probabilities to distributions that offer greater learning potential, thereby maximizing cumulative capability gain.", + "bbox": [ + 169, + 319, + 823, + 402 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As motivated in Theorem 3.1, we quantify the learning potential of a distribution $d$ via its expected absolute advantage, defined as $L(d) = \\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o\\sim \\pi_{\\theta}(\\cdot |x)}\\left[\\left|\\hat{A}(o)\\right|\\right]\\right]$ . Our objective is to dynamically adjust the sampling distribution over $\\mathcal{D}$ such that, over the training horizon $T$ , we approximately maximize the total expected learnability gain $\\sum_{t=1}^{T}\\mathbb{E}_{d\\sim P_t}[L(d)]$ , where $P_t$ is the sampling distribution at step $t$ . This setup resembles a multi-armed bandit (MAB) problem, where each distribution acts as an arm and its reward corresponds to its learnability. In this setting, the central challenge is to estimate and balance each distribution's potential: exploiting those with high observed advantage while still exploring under-sampled ones that may offer long-term benefit. To this end, we adopt the classic Upper Confidence Bound (UCB) principle [31], which provides theoretical guarantees for balancing exploration and exploitation in bandit problems. Specifically, UCB-based algorithms achieve sublinear regret compared to the optimal fixed-arm strategy, and we show in Appendix B that applying UCB on empirical advantage statistics yields a near-optimal schedule under mild assumptions. To allow smoother allocation of sampling probabilities without hard cutoffs and reducing variance in learning, we adopt a soft-selection mechanism: instead of choosing one distribution at each step, we compute a UCB score for every distribution and normalize the scores with a softmax function to obtain a sampling distribution. This soft-selection formulation preserves the spirit of UCB—higher scoring distributions are sampled more—but enables partial exploration of all arms, and it is easier to integrate into LLM training pipelines. The resulting sampling distribution provides a convex mixture over data sources, where each distribution $d_j$ is selected with probability. Each training batch is then composed by drawing examples from multiple distributions in proportion to their scores. To estimate learnability in practice, we maintain a sliding window $\\mathcal{A}_{d_j}^w$ of recent absolute advantages for each distribution $d_j$ , and define its empirical reward as the mean absolute advantage: $\\hat{L}(d_j) = \\frac{1}{|\\mathcal{A}_{d_j}^w|}\\sum_{a\\in \\mathcal{A}_{d_j}^w}|a|$ . We also track the total number of samples drawn from each distribution $n_{d_j}$ , and the global sample count $n_{\\mathrm{total}} = \\sum_{j}n_{d_j}$ . The UCB score for each distribution is:", + "bbox": [ + 169, + 409, + 826, + 767 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathrm {U C B} \\left(d _ {j}\\right) = \\hat {L} \\left(d _ {j}\\right) + \\sqrt {\\frac {2 \\log \\left(n _ {\\text {t o t a l}} + 1\\right)}{n _ {d _ {j}} + 1}} \\tag {3}\n$$\n", + "text_format": "latex", + "bbox": [ + 361, + 773, + 823, + 813 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The first term encourages exploitation of distributions with high observed advantages, while the second term ensures sufficient exploration of rarely sampled distributions. To obtain the final sampling weights, we apply a softmax over the UCB scores. Specifically, the probability of selecting distribution $d_{j}$ is computed as: $P(d_{j}) = \\frac{\\exp(\\mathrm{UCB}(d_{j}) / \\tau)}{\\sum_{j=1}^{N} \\exp(\\mathrm{UCB}(d_{j}) / \\tau)}$ , where $\\tau > 0$ is a temperature hyperparameter that controls the sharpness of the sampling distribution. A lower $\\tau$ results in more peaked selection around the top-scoring distributions, while a higher $\\tau$ leads to a smoother, more exploratory curriculum. This", + "bbox": [ + 169, + 819, + 823, + 911 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 3 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Automated Distribution-Level Curriculum Learning with UCB Sampling" + ], + "code_body": "Input: Dataset $\\mathcal{D} = \\{d_1,\\dots ,d_N\\}$ ; pre-trained model parameters $\\theta$ \nOutput: Post-trained model parameters $\\theta$ \n1: function DUMP(D, $\\theta$ \n2: Initialize distribution-level statistics \n3: for each $d_{j}\\in \\mathcal{D}$ do \n4: $A_{dj}^{w}\\gets []$ Sliding window for absolute advantages \n5: $n_{d_j}\\gets 0$ Total samples seen from $d_{j}$ \n6: $P(d_{j})\\leftarrow \\frac{1}{N}$ Equal initial weights \n7: for training step $t = 1,2,\\ldots ,T$ do \n8: Sample batch $\\mathcal{B}_t$ from $\\mathcal{D}$ according to $P(d_j)$ \n9: Compute advantages $\\hat{A} (o)$ for all $o\\in \\mathcal{B}_t$ via model rollout \n10: for each $d_{j}$ with samples in $\\mathcal{B}_t$ do \n11: $n_{d_j}\\gets n_{d_j} + |\\mathcal{B}_{t,d_j}|$ Update sample count; $\\mathcal{B}_{t,d_j}$ : subset of batch from $d_j$ \n12: $\\mathcal{A}_{dj}^{w}\\gets \\mathcal{A}_{dj}^{w}\\cup \\{\\left|\\hat{A} (o)\\right|\\mid x\\in \\mathcal{B}_{t,d_j},o\\sim \\pi_{\\theta}(\\cdot |x)\\}$ Append new advantages from $d_j$ \n13: $A_{dj}^{w}\\gets A_{dj}^{w}[-k:]$ k: Window Size; Keep last $k$ elements \n14: Compute UCB scores for each distribution \n15: $n_{\\mathrm{total}}\\gets \\sum_{d_j\\in \\mathcal{D}}n_{d_j}$ \n16: for each $d_{j}\\in \\mathcal{D}$ do \n17: $\\hat{L} (d_j)\\gets \\frac{1}{|\\mathcal{A}_{dj}^w|}\\sum_{a\\in \\mathcal{A}_{dj}^w}a$ Mean of absolute advantages \n18: UCB $(d_j)\\gets \\hat{L} (d_j) + \\sqrt{\\frac{2\\log(n_{\\mathrm{total}} + 1)}{n_{d_j} + 1}}$ Eq.3 \n19: Update sampling distribution \n20: $P(d_j)\\gets \\frac{\\exp(UCB(d_j) / \\tau)}{\\sum_{j = 1}^{N}\\exp(UCB(d_j) / \\tau)}\\quad \\forall d_j\\in \\mathcal{D}$ △: temperature \n21: Update $\\theta$ using $\\mathcal{B}_t$ with an RL algorithm (e.g., GRPO) \n22: return $\\theta$", + "bbox": [ + 173, + 107, + 825, + 520 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "bandit-based formulation provides a lightweight, adaptive, and reward-sensitive curriculum learning mechanism. It balances the need to focus on learnable distributions while avoiding premature neglect of underexplored ones. In the next section, we present the complete algorithmic implementation of DUMP, including its integration with rollout procedures and online statistics tracking.", + "bbox": [ + 169, + 547, + 823, + 604 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "3.3 Algorithm", + "text_level": 1, + "bbox": [ + 171, + 614, + 285, + 628 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The detailed curriculum learning procedure is illustrated in Algorithm 1. The algorithm takes as input a dataset $\\mathcal{D} = \\{d_1, \\ldots, d_N\\}$ composed of multiple distributions and returns the optimized model parameters $\\theta$ through a reinforcement learning loop. In lines 3-6, we initialize per-distribution statistics: each distribution $d_j \\in \\mathcal{D}$ is associated with an empty sliding window $\\mathcal{A}_{d_j}^w$ to store recent absolute advantages, a counter $n_{d_j}$ for tracking the number of samples drawn from $d_j$ , and an initial sampling probability $P(d_j) = \\frac{1}{N}$ indicating uniform sampling. At each training step $t$ (line 8), a batch $\\mathcal{B}_t$ is sampled according to the current distribution weights $P(d_j)$ . Advantages $\\hat{A}(o)$ are then computed via model rollouts for each sampled output $o \\in \\mathcal{B}_t$ (line 9). For every distribution $d_j$ that contributes samples in the current batch, we update its sample count $n_{d_j}$ (line 11), append the corresponding advantages to its sliding window $\\mathcal{A}_{d_j}^w$ (line 12), and truncate the window to retain only the most recent $k$ entries (300 by default) in line 13. This ensures that our estimate of per-distribution learnability remains up-to-date and robust to noise. In lines 15-18, we compute the Upper Confidence Bound (UCB) score $\\mathrm{UCB}(d_j)$ for each distribution. The score consists of two terms: the empirical mean absolute advantage $\\hat{L}(d_j)$ over the sliding window $\\mathcal{A}_{d_j}^w$ , and an exploration bonus inversely proportional to the square root of the number of samples $n_{d_j}$ . This balances prioritization of distributions that are either highly learnable or underexplored. In line 20, the sampling probabilities $P(d_j)$ are updated by applying a softmax over the UCB scores with a temperature parameter $\\tau$ (0.1 by default). Lower values of $\\tau$ result in sharper distributions that concentrate more heavily on top-ranked distributions, while higher $\\tau$ values induce a smoother, more exploratory curriculum. Finally, in line", + "bbox": [ + 169, + 633, + 826, + 912 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "21, the model parameters $\\theta$ are updated using the current batch $\\mathcal{B}_t$ with a reinforcement learning algorithm such as GRPO. After $T$ steps, the algorithm returns the post-trained model $\\theta$ , which has been adaptively guided to learn from the most informative distributions.", + "bbox": [ + 169, + 90, + 823, + 133 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4 Experiments and Results", + "text_level": 1, + "bbox": [ + 171, + 147, + 415, + 165 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "In this section, we first introduce our experiments setup including used models datasets and more implementation details. We then demonstrate the results for the effectiveness of our method DUMP. More discussion about the comparison to static heuristic curriculum [11, 10] can be found in Appendix C.", + "bbox": [ + 169, + 171, + 826, + 228 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "4.1 Experiments Setup", + "text_level": 1, + "bbox": [ + 171, + 241, + 346, + 257 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "RL Algorithm and LLM Models. We use GRPO [3] as the underlying RL algorithm in our experiments, which is commonly used in capability-oriented LLM post-training [4]. We use Qwen2.5-7B-Instruct-1M [32] and Qwen2.5-3B-Instruct [32] in our experiments.", + "bbox": [ + 169, + 263, + 828, + 306 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Datasets and Settings. Multiple datasets are used in our experiments, including Knights and Knaves (K&K) puzzle dataset [33], RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench [39], GSM-8K [40], and AIME 1983-2024 [21]. In our experiments, we consider three different settings. The prompt template used in shown in Figure 3 in the Appendix.", + "bbox": [ + 169, + 311, + 826, + 368 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setting 1: Post-training on $K \\& K$ puzzles with varying character numbers. The Knights and Knaves (K&K) dataset [33] contains procedurally generated logic puzzles where each character is either a knight (always truthful) or a knave (always lying), and the goal is to infer each character's identity. The dataset supports fine-grained difficulty control by adjusting the number of characters. We generate puzzles with 3 to 14 characters, treating each character count as a separate distribution—yielding 12 distinct distributions. Each distribution includes 900 training and 100 test samples. We post-train Qwen2.5-7B-Instruct-1M on the combined dataset across all distributions.", + "bbox": [ + 169, + 375, + 826, + 470 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setting 2: Post-training on diverse logic reasoning distributions. We perform post-training using a mixture of logic reasoning datasets, including RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench Geomotion [39], and Knights and Knaves (K&K) [33]. For RuleTaker, ProofWriter, and K&K, we further partition the data distributions by complexity levels: RuleTaker by 2, 3, and 5 required reasoning steps; ProofWriter by 3, 4, and 5 required reasoning steps; and K&K by the number of characters (3-7). In total, we construct 15 logic distributions, each containing 400 training samples. We use Qwen2.5-7B-Instruct-1M for this setting.", + "bbox": [ + 169, + 477, + 826, + 575 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Setting 3: Post-training on diverse math reasoning distributions. We also explore post-training on diverse math data. For AIME, we split the data into four distributions based on competition years—1983–1993, 1994–2004, 2005–2015, and 2016–2024—since problem styles evolve significantly over time. We also include GSM-8K as a complementary math dataset. This results in five math distributions in total, with 7473 (GSM-8K), 124, 194, 283, and 238 training samples, respectively. We use Qwen2.5-3B-Instruct for this setting.", + "bbox": [ + 169, + 580, + 826, + 664 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reward Implementation. We adopt the rule-based reward mechanism Shao et al. [3] to provide stable and hack-resistant training signals during RL-based post-training and follow the detailed reward implementation in Logic-RL [11]. Specifically, each model response is expected to follow a structured format with the reasoning process enclosed in tags and the final answer enclosed in tags. The reward system consists of two components:", + "bbox": [ + 169, + 670, + 826, + 741 + ], + "page_idx": 5 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Format Reward. A binary reward based on whether the output strictly adheres to the expected format. If the model includes exactly one well-formed and one section in the correct order, it receives a reward of +1; otherwise, it receives a penalty of -1.", + "- Answer Reward. We evaluate the correctness of the final answer. If the predicted identities fully match the ground truth, the model receives a reward of $+2$ ; if the answer is incorrect, -1.5; and if the answer is missing or unparsable, -2." + ], + "bbox": [ + 215, + 753, + 826, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Other Implementation Details. All experiments are conducted on servers equipped with 8 Nvidia A100 GPUs. Our method is implemented with VeRL [41] LLM Reinforcement Learning framework. We use GRPO [3] as the training algorithm and follow standard practice for actor rollout and optimization. The actor learning rate is set to $1e - 6$ , training batch size is set to 128, and the PPO", + "bbox": [ + 169, + 854, + 826, + 912 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/e6f0c0bb94a114b13ae70ae1245e9712158f506828d63f66fd321b5acaa082cd.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
Data Distributionwithout DUMPwith DUMP
RuleTaker 2 Steps0.790.79
RuleTaker 3 Steps0.761.02
RuleTaker 5 Steps0.560.98
ProofWriter 3 Steps1.181.09
ProofWriter 4 Steps0.971.09
ProofWriter 5 Steps1.241.05
AR-LSAT-0.70-0.52
LogiQA1.941.70
LogicNLI-0.29-0.23
LongICLBench Geomotion0.540.25
K & K 3 Characters2.002.00
K & K 4 Characters1.541.76
K & K 5 Characters1.531.84
K & K 6 Characters0.831.42
K & K 7 Characters0.561.02
Average0.901.17
", + "bbox": [ + 202, + 88, + 485, + 282 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/aa3785a5149d03ddc5b6795803c83eba1155b634ceaac84b5d8b464165a3bb38.jpg", + "table_caption": [ + "Table 1: Test Answer Reward (see Section 4.1) on diverse logic reasoning distributions (Setting 2). The model used here is Qwen2.5-7B-Instruct-1M." + ], + "table_footnote": [], + "table_body": "
Data Distributionwithout DUMPwith DUMP
GSM-8K1.501.47
AIME 1983-1993-0.76-0.39
AIME 1994-2004-1.50-1.02
AIME 2005-2015-0.94-0.94
AIME 2016-2024-1.27-1.27
Average-0.59-0.43
", + "bbox": [ + 553, + 138, + 794, + 232 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 2: Test Answer Reward (see Section 4.1) on diverse math reasoning distributions (Setting 3). The model used here is Qwen2.5-3B-Instruct.", + "bbox": [ + 522, + 239, + 823, + 295 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "mini-batch size is 32. KL divergence regularization is applied to encourage alignment with the reference policy, with a KL loss coefficient of 0.001. Each rollout batch contains 16 responses. If not specified, we allow for a maximum response length of 20480 and 4096 tokens during training for Qwen2.5-7B-Instruct-1M and Qwen2.5-3B-Instruct, respectively. The window size $k$ and the temperature $\\tau$ in our curriculum learning framework is set to 300 and 0.1, respectively.", + "bbox": [ + 169, + 339, + 823, + 409 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.2 Effectiveness of DUMP", + "text_level": 1, + "bbox": [ + 171, + 419, + 377, + 431 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Setting 1: Post-training on the combination of K&K puzzle datasets with different number of characters. To evaluate the effectiveness of DUMP in improving post-training efficiency and performance, we compare it against a uniform distribution sampling baseline across 12 distinct data distributions in the K&K puzzle dataset. Each distribution corresponds to a fixed number of characters in the puzzle, ranging from 3 to 14. Figure 1 plots the test answer reward over training steps for each distribution, with and without DUMP. Across all distributions, DUMP consistently outperforms the baseline, achieving faster convergence and higher test performance. The gains are particularly notable in mid- to high-difficulty distributions (e.g., 6 to 12 characters), where uniform sampling tends to struggle due to data underutilization. For example, in the 9-character distribution (Figure 1g), the model trained with DUMP achieves a reward of over 0.5, whereas the baseline remains below 0.0. These results validate the core intuition of DUMP: dynamically adjusting the sampling focus toward high-learnability distributions accelerates policy improvement while avoiding wasted effort on over-saturated or low-signal data. Notably, the improvement is achieved without any curriculum heuristics or manual data ordering—only by observing advantage signals and adapting online.", + "bbox": [ + 169, + 438, + 826, + 633 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Setting 2: Post-training on diverse logic reasoning distributions. We apply DUMP to 15 logic reasoning distributions including subsets of RuleTaker, ProofWriter, and K&K (with varying difficulty levels), as well as datasets such as AR-LSAT, LogiQA, LogicNLI, and LongICLBench. As shown in Table 1, DUMP improves the average test answer reward from 0.90 to 1.17. Notable improvements are observed on complex tasks such as AR-LSAT, where the reward increases from -0.70 to -0.52, and K&K 7 Characters, from 0.56 to 1.02. These results demonstrate that DUMP adaptively prioritizes undertrained but learnable distributions, leading to more efficient capability gains.", + "bbox": [ + 169, + 638, + 823, + 736 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Setting 3: Post-training on diverse math data distributions. We further evaluate DUMP on GSM-8K and different subsets of AIME grouped by competition years. As shown in Table 2, DUMP raises the average test answer reward from -0.59 to -0.43, with the most significant gain on AIME 1994-2004, where performance improves from -1.50 to -1.02. These results highlight DUMP's robustness under distribution shifts and data imbalance.", + "bbox": [ + 169, + 741, + 823, + 810 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "4.3 Ablation Study on the Sampling Strategy", + "text_level": 1, + "bbox": [ + 171, + 821, + 498, + 837 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "In this section, we ablate the sampling strategy used in DUMP's UCB-based scheduler. As described in Algorithm 1, our method applies soft sampling controlled by a temperature parameter. The greedy variant (temperature $= 0$ ) always selects the distribution with the highest UCB score, while our default uses a small temperature (0.1) to enable probabilistic sampling. We conduct experiments under Setting 1, with a maximum training response length of 10240 tokens. After 100 training steps, the", + "bbox": [ + 169, + 840, + 823, + 912 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 89, + 385, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 89, + 602, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 90, + 820, + 196 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg", + "image_caption": [ + "(a) 3 Characters" + ], + "image_footnote": [], + "bbox": [ + 174, + 220, + 385, + 330 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg", + "image_caption": [ + "(b) 4 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 220, + 602, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg", + "image_caption": [ + "(c) 5 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 220, + 820, + 329 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg", + "image_caption": [ + "(d) 6 Characters" + ], + "image_footnote": [], + "bbox": [ + 174, + 353, + 385, + 460 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg", + "image_caption": [ + "(e) 7 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 353, + 602, + 460 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg", + "image_caption": [ + "(f) 8 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 353, + 820, + 460 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg", + "image_caption": [ + "(g) 9 Characters", + "(j) 12 Characters", + "Figure 1: Effectiveness of DUMP on the K&K puzzle dataset mixed with 12 distributions defined by the number of characters in each puzzle (Setting 1). DUMP consistently achieves higher answer reward on test dataset compared to baseline. The model used here is Qwen2.5-7B-Instruct-1M." + ], + "image_footnote": [], + "bbox": [ + 174, + 484, + 385, + 592 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg", + "image_caption": [ + "(h) 10 Characters", + "(k) 13 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 484, + 602, + 592 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg", + "image_caption": [ + "(i) 11 Characters", + "(1) 14 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 484, + 820, + 592 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "greedy strategy significantly underperforms due to its lack of exploration—it tends to lock onto a single distribution early and fails to adapt. For instance, on the 13- and 14-character K&K tasks, the greedy variant achieves test answer rewards of $-0.91$ and $-1.38$ , while soft sampling reaches $-0.66$ and $-1.16$ , respectively. These results highlight the importance of maintaining exploration via a non-zero temperature to prevent the scheduler from collapsing onto suboptimal distributions.", + "bbox": [ + 169, + 676, + 823, + 746 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "4.4 Analyzing the Automated Curriculum by DUMP", + "text_level": 1, + "bbox": [ + 169, + 755, + 555, + 770 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "To understand how DUMP dynamically allocates training effort across data distributions, we analyze the sampling patterns induced by its UCB-based curriculum mechanism. Figure 2 shows the cumulative number of samples drawn from each distribution (3 to 14 characters) over the course of training on K&K puzzles with varying character numbers (Setting 1). We observe a clear curriculum-like progression: distributions corresponding to simpler puzzles (e.g., 3-5 characters) are heavily sampled in the early stages of training, while more complex distributions (e.g., 10-14 characters) are gradually introduced and increasingly prioritized as training progresses. This pattern aligns with the model's evolving capacity—early training favors distributions with high initial advantage magnitudes, and as the model saturates on those, DUMP shifts focus to underexplored but learnable distributions. Importantly, this adaptive sampling behavior emerges automatically from empirical advantage signals", + "bbox": [ + 169, + 772, + 826, + 912 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 176, + 89, + 385, + 196 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 392, + 89, + 602, + 196 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 609, + 90, + 820, + 196 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg", + "image_caption": [ + "(a) 3 Characters" + ], + "image_footnote": [], + "bbox": [ + 174, + 220, + 385, + 330 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg", + "image_caption": [ + "(b) 4 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 220, + 602, + 329 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg", + "image_caption": [ + "(c) 5 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 220, + 820, + 329 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg", + "image_caption": [ + "(d) 6 Characters" + ], + "image_footnote": [], + "bbox": [ + 174, + 352, + 385, + 460 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg", + "image_caption": [ + "(e) 7 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 353, + 602, + 460 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg", + "image_caption": [ + "(f) 8 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 353, + 820, + 460 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg", + "image_caption": [ + "(g) 9 Characters", + "(j) 12 Characters", + "Figure 2: Curriculum (sample counts) induced by DUMP across 12 K&K puzzle distributions with increasing difficulty defined by the number of characters in each puzzle (Setting 1). Simpler distributions are automatically prioritized in early training, while more complex ones are progressively emphasized—both in an entirely automated manner—demonstrating automated distribution scheduling." + ], + "image_footnote": [], + "bbox": [ + 174, + 483, + 385, + 592 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg", + "image_caption": [ + "(h) 10 Characters", + "(k) 13 Characters" + ], + "image_footnote": [], + "bbox": [ + 392, + 483, + 602, + 592 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg", + "image_caption": [ + "(i) 11 Characters", + "(1) 14 Characters" + ], + "image_footnote": [], + "bbox": [ + 609, + 483, + 820, + 592 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "without requiring manual specification of curriculum order. These results highlight DUMP's ability to construct an implicit, data-driven curriculum that mirrors traditional easy-to-hard strategies, while remaining responsive to online training dynamics.", + "bbox": [ + 169, + 715, + 823, + 758 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "5 Conclusion", + "text_level": 1, + "bbox": [ + 171, + 782, + 302, + 797 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "In this work, we introduce a distribution-level curriculum learning framework for RL-based posttraining of large language models. DUMP leverages the expected absolute advantage as a learnability signal to adaptively allocate training focus across heterogeneous distributions. By formalizing scheduling as a multi-armed bandit and adopting a UCB-based sampling strategy, DUMP balances exploitation and exploration in a principled way. Experiments demonstrate that DUMP consistently improves convergence and final performance over baselines. These results highlight the value of distribution-aware curriculum learning in LLM RL post-training.", + "bbox": [ + 169, + 814, + 826, + 912 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 173, + 89, + 269, + 104 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022.", + "[2] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023.", + "[3] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024.", + "[4] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025.", + "[5] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. The flan collection: Designing data and methods for effective instruction tuning. In International Conference on Machine Learning, pages 22631-22648. PMLR, 2023.", + "[6] Harrison Lee, Samrat Phatale, Hassan Mansoor, Thomas Mesnard, Johan Ferret, Kellie Lu, Colton Bishop, Ethan Hall, Victor Carbune, Abhinav Rastogi, et al. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv e-prints, pages arXiv-2309, 2023.", + "[7] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, et al. T\\''ulu 3: Pushing frontiers in open language model post-training. arXiv preprint arXiv:2411.15124, 2024.", + "[8] ByteDance Seed. Seed-thinking-v1.5: Advancing superb reasoning models with reinforcement learning. Technical report, ByteDance, 2025. URL https://github.com/ByteDance-Seed/Seed-Thinking-v1.5.", + "[9] Pulkit Pattnaik, Rishabh Maheshwary, Kelechi Ogueji, Vikas Yadav, and Sathwik Tejaswi Madhusudhan. Enhancing alignment using curriculum learning & ranked preferences. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 12891-12907, 2024.", + "[10] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025.", + "[11] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025.", + "[12] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017.", + "[13] Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019.", + "[14] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022." + ], + "bbox": [ + 173, + 113, + 825, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[15] Amelia Glaese, Nat McAleese, Maja Trebacz, John Aslanides, Vlad Firoiu, Timo Ewalds, Maribeth Rauh, Laura Weidinger, Martin Chadwick, Phoebe Thacker, et al. Improving alignment of dialogue agents via targeted human judgements. arXiv preprint arXiv:2209.14375, 2022.", + "[16] OpenAI. Learning to reason with llms. Technical report, OpenAI, 2024. URL https://openai.com/index/learning-to-reason-with-llms/.", + "[17] OpenAI. Openai o3-mini. Technical report, OpenAI, 2025. URL https://openai.com/index/openai-o3-mini/.", + "[18] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaiev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025.", + "[19] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024.", + "[20] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024.", + "[21] Aime_1983_2024 (revision 6283828), 2025. URL https://huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024.", + "[22] Mikhail Mirzayanov. Codeforces. https://codeforces.com/. Accessed: 2025-04-13.", + "[23] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017.", + "[24] Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pages 41-48, 2009.", + "[25] Alex Graves, Marc G Bellemare, Jacob Menick, Remi Munos, and Koray Kavukcuoglu. Automated curriculum learning for neural networks. In international conference on machine learning, pages 1311-1320. Pmlr, 2017.", + "[26] Niels Justesen, Ruben Rodriguez Torrado, Philip Bontrager, Ahmed Khalifa, Julian Togelius, and Sebastian Risi. Illuminating generalization in deep reinforcement learning through procedural level generation. arXiv preprint arXiv:1806.10729, 2018.", + "[27] Rui Wang, Joel Lehman, Jeff Clune, and Kenneth O Stanley. Paired open-ended trailblazer (poet): Endlessly generating increasingly complex and diverse learning environments and their solutions. arXiv preprint arXiv:1901.01753, 2019.", + "[28] Richard Li, Allan Jabri, Trevor Darrell, and Pulkit Agrawal. Towards practical multi-object manipulation using relational reinforcement learning. In 2020 IEEE international conference on robotics and automation (icra), pages 4051-4058. IEEE, 2020.", + "[29] Tambet Matiisen, Avital Oliver, Taco Cohen, and John Schulman. Teacher-student curriculum learning. IEEE transactions on neural networks and learning systems, 31(9):3732-3740, 2019.", + "[30] Rémy Portelas, Cédric Colas, Katja Hofmann, and Pierre-Yves Oudeyer. Teacher algorithms for curriculum learning of deep rl in continuously parameterized environments. In Conference on Robot Learning, pages 835-853. PMLR, 2020.", + "[31] Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. Finite-time analysis of the multiarmed bandit problem. Machine learning, 47:235-256, 2002.", + "[32] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + ], + "bbox": [ + 171, + 90, + 826, + 910 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 10 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[33] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024.", + "[34] Peter Clark, Oyvind Tafjord, and Kyle Richardson. Transformers as soft reasoners over language. arXiv preprint arXiv:2002.05867, 2020.", + "[35] Oyvind Tafjord, Bhavana Dalvi Mishra, and Peter Clark. Proofwriter: Generating implications, proofs, and abductive statements over natural language. arXiv preprint arXiv:2012.13048, 2020.", + "[36] Wanjun Zhong, Siyuan Wang, Duyu Tang, Zenan Xu, Daya Guo, Jiahai Wang, Jian Yin, Ming Zhou, and Nan Duan. Ar-lsat: Investigating analytical reasoning of text. arXiv preprint arXiv:2104.06598, 2021.", + "[37] Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning. arXiv preprint arXiv:2007.08124, 2020.", + "[38] Jidong Tian, Yitian Li, Wenqing Chen, Liqiang Xiao, Hao He, and Yaohui Jin. Diagnosing the first-order logical reasoning ability through logicli. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3738-3747, 2021.", + "[39] Tianle Li, Ge Zhang, Quy Duc Do, Xiang Yue, and Wenhu Chen. Long-context llms struggle with long in-context learning. arXiv preprint arXiv:2404.02060, 2024.", + "[40] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021.", + "[41] Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + ], + "bbox": [ + 173, + 90, + 826, + 517 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A Proof for Theorem 3.1", + "text_level": 1, + "bbox": [ + 171, + 89, + 395, + 104 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Theorem A.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy $\\pi_{\\theta}$ and a data distribution $d$ , the expected absolute advantage $\\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o_i\\sim \\pi_\\theta (\\cdot |x)}\\left[|\\hat{A}_i|\\right]\\right]$ serves as a proxy for how much that distribution $d$ can help the model improve, where the distribution $d$ consisting of prompts $x\\sim d$ , each prompt has a group of sampled outputs $\\{o_1,\\ldots ,o_n\\}$ , and $\\hat{A}_i$ denotes the advantage of output $o_i$ .", + "bbox": [ + 169, + 119, + 826, + 202 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Proof. Let $\\pi_{\\theta}$ be the current model policy. Consider a data distribution $d$ , where $x \\sim d$ are prompts and $\\{o_1, \\ldots, o_n\\} \\sim \\pi_{\\theta}(\\cdot | x)$ are sampled outputs. For each output $o_i$ , the advantage is estimated as", + "bbox": [ + 169, + 215, + 823, + 244 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\hat {A} _ {i} = r _ {i} - b (x),\n$$\n", + "text_format": "latex", + "bbox": [ + 442, + 250, + 553, + 267 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "where $r_i$ is the reward assigned to $o_i$ and $b(x)$ is a baseline (e.g., the mean reward over the group). The policy gradient under common policy-gradient methods (e.g., PPO or GRPO) can be written as:", + "bbox": [ + 169, + 272, + 826, + 301 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\nabla_ {\\theta} \\mathcal {J} (\\theta) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ \\hat {A} _ {i} \\cdot \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\right] \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 307, + 306, + 684, + 333 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Now consider the magnitude of the gradient vector. The strength of the training signal from $d$ depends on the expected norm of the gradient, which is bounded below by:", + "bbox": [ + 169, + 343, + 823, + 372 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\left\\| \\nabla_ {\\theta} \\mathcal {J} (\\theta) \\right\\| \\gtrsim \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\cdot \\| \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\| \\right] \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 287, + 377, + 705, + 405 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Assuming that $\\| \\nabla_{\\theta}\\log \\pi_{\\theta}(o_i\\mid x)\\|$ is bounded and varies slowly across $d$ , the dominant term affecting the gradient norm is:", + "bbox": [ + 169, + 414, + 823, + 443 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\right] \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 403, + 441, + 589, + 468 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Thus, the expected absolute advantage serves as a proxy for the learning signal magnitude contributed by distribution $d$ . The expected absolute advantage reflects how much training on distribution $d$ can improve the model parameters, making it a suitable signal for curriculum scheduling.", + "bbox": [ + 169, + 476, + 823, + 518 + ], + "page_idx": 12 + }, + { + "type": "image", + "img_path": "images/ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 807, + 523, + 823, + 536 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "B Theoretical Justification for UCB-Based Distribution Scheduling", + "text_level": 1, + "bbox": [ + 169, + 556, + 748, + 575 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We provide a theoretical justification for using Upper Confidence Bound (UCB) as a strategy for scheduling training over data distributions in RL-based post-training. Our objective is to maximize the cumulative learnability gain over $T$ training steps, defined as:", + "bbox": [ + 169, + 588, + 823, + 631 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\max _ {\\{d _ {t} \\} _ {t = 1} ^ {T}} \\sum_ {t = 1} ^ {T} L (d _ {t}), \\quad \\text {w h e r e} \\quad L (d) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} (o) | \\right] \\right].\n$$\n", + "text_format": "latex", + "bbox": [ + 277, + 636, + 715, + 676 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "This setting can be viewed as a stochastic multi-armed bandit (MAB) problem, where each data distribution $d_{j} \\in \\mathcal{D}$ corresponds to an arm with unknown reward $L(d_{j})$ , interpreted as the expected absolute advantage from training on samples from $d_{j}$ . At each training step $t$ , the learner selects a distribution $d_{t}$ and obtains an empirical reward $\\hat{L}(d_{t})$ by averaging the absolute advantages observed in the batch.", + "bbox": [ + 169, + 689, + 823, + 760 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We define the regret as the gap between the cumulative learnability gain of the best fixed distribution $d^{*} = \\arg \\max_{d}L(d)$ and that of the learner's actual selections:", + "bbox": [ + 169, + 768, + 823, + 796 + ], + "page_idx": 12 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e g r e t} (T) = \\sum_ {t = 1} ^ {T} L \\left(d ^ {*}\\right) - \\sum_ {t = 1} ^ {T} L \\left(d _ {t}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 372, + 801, + 622, + 844 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "To analyze this regret, we make the following assumptions:", + "bbox": [ + 169, + 854, + 563, + 871 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "1. For each distribution $d_{j}$ , the per-output absolute advantages $|\\hat{A}(o)|$ , where $o \\sim \\pi_{\\theta}(\\cdot|x)$ , are i.i.d. and bounded in $[0, C]$ for some constant $C > 0$ .", + "bbox": [ + 210, + 882, + 823, + 912 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "2. The true expected advantage $L(d_{j})$ remains approximately stationary over a local training window, enabling meaningful online adaptation.", + "bbox": [ + 207, + 90, + 823, + 119 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Note: In practice, we can clip or normalize $|\\hat{A}(o)|$ to satisfy the boundedness condition. The introduction of the constant $C$ only scales the regret by a constant factor and does not affect the asymptotic rate of convergence.", + "bbox": [ + 169, + 133, + 823, + 176 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Under these assumptions, the following regret bound holds:", + "bbox": [ + 171, + 181, + 565, + 196 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Theorem B.1. Let $\\mathcal{D} = \\{d_1, \\ldots, d_N\\}$ be a set of data distributions with fixed expected rewards $L(d_j) \\in [0, C]$ . Then, applying the UCB1 algorithm to the empirical reward observations yields the regret bound:", + "bbox": [ + 169, + 199, + 823, + 242 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\nR e g r e t (T) \\leq O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right), \\quad w h e r e \\quad \\Delta_ {j} = L \\left(d ^ {*}\\right) - L \\left(d _ {j}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 259, + 250, + 736, + 297 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Proof. This result is a direct application of the classical UCB1 regret bound [31], extended to the case where reward values lie in $[0, C]$ . Let $d^{*} = \\arg \\max_{d} L(d)$ be the optimal distribution, and let $\\Delta_{j} = L(d^{*}) - L(d_{j})$ denote the suboptimality gap for each arm $d_{j}$ .", + "bbox": [ + 169, + 311, + 823, + 354 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "At each time step $t$ , UCB1 selects the distribution $d_{j}$ with the highest upper confidence bound:", + "bbox": [ + 169, + 359, + 792, + 375 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\mathbf {U C B} (d _ {j}) = \\hat {L} (d _ {j}) + \\sqrt {\\frac {2 C ^ {2} \\log t}{n _ {j}}},\n$$\n", + "text_format": "latex", + "bbox": [ + 380, + 382, + 616, + 422 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where $n_j$ is the number of times distribution $d_j$ has been sampled so far, and $\\hat{L}(d_j)$ is the empirical mean of observed rewards (mean absolute advantages).", + "bbox": [ + 169, + 431, + 823, + 460 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Under the assumptions that rewards are i.i.d. and bounded in $[0, C]$ , the Hoeffding inequality guarantees that with high probability the empirical mean concentrates around the true mean $L(d_{j})$ , and the UCB selection mechanism will only pick suboptimal arms a logarithmic number of times. Based on UCB1 regret bound [31], The cumulative regret is therefore bounded by:", + "bbox": [ + 169, + 465, + 826, + 523 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e g r e t} (T) \\leq \\sum_ {j: \\Delta_ {j} > 0} \\left(\\frac {8 C ^ {2} \\log T}{\\Delta_ {j}} + O (\\Delta_ {j})\\right),\n$$\n", + "text_format": "latex", + "bbox": [ + 341, + 529, + 653, + 569 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "which simplifies to the stated asymptotic bound:", + "bbox": [ + 169, + 575, + 491, + 590 + ], + "page_idx": 13 + }, + { + "type": "equation", + "text": "\n$$\n\\operatorname {R e g r e t} (T) = O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right).\n$$\n", + "text_format": "latex", + "bbox": [ + 369, + 597, + 625, + 646 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "This result shows that our distribution-level scheduling strategy, when driven by UCB over empirical advantage rewards, is provably efficient. It dynamically concentrates training on distributions with high estimated learnability while ensuring sufficient exploration, with regret that scales logarithmically in $T$ and linearly in $1 / \\Delta_j$ .", + "bbox": [ + 169, + 680, + 823, + 738 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C Comparison to Heuristic Curriculum", + "text_level": 1, + "bbox": [ + 169, + 756, + 522, + 773 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Heuristic curricula, which manually specify a fixed training schedule over data distributions—e.g., training on Distribution A for N steps before switching to Distribution B—have been explored in prior work [11, 10], particularly in environments where task difficulty or domain progression is well understood. However, such approaches have several limitations that make them less suitable for our setting. First, effective heuristic scheduling requires strong prior knowledge about the relative difficulty and learnability of each distribution. In our setting, which involves diverse domains such as logic reasoning, mathematics, and programming, such prior knowledge is often unavailable or misleading. For example, a distribution may appear \"easier\" but provide low learning signal, or seem \"harder\" but actually yield high gradient utility. This makes it extremely difficult to construct", + "bbox": [ + 169, + 787, + 826, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 490, + 935, + 508, + 946 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Example of Prompt", + "text_level": 1, + "bbox": [ + 192, + 95, + 305, + 107 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Now the user asks you to solve a reasoning problem. After thinking, when you finally reach a conclusion, clearly state the identity of each character within tags. [Problem]", + "bbox": [ + 189, + 111, + 818, + 170 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Figure 3: Example of prompt used.", + "bbox": [ + 380, + 181, + 614, + 196 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "robust, generalizable heuristics across tasks. Second, heuristic curricula are static and cannot adapt to the evolving needs of the model during training. In contrast, DUMP dynamically adjusts sampling priorities based on actual model performance—measured via policy advantages—allowing it to focus on the most beneficial distributions at each stage of learning. Finally, the lack of standardized or widely accepted heuristic curricula for our task suite makes it hard to conduct fair and meaningful comparisons. Instead, we benchmark DUMP against uniform sampling and adaptive baselines, which are more reflective of current best practices in large-scale post-training pipelines.", + "bbox": [ + 169, + 223, + 823, + 321 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "D Limitations", + "text_level": 1, + "bbox": [ + 171, + 340, + 307, + 356 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "First, while the core idea of distribution-level curriculum learning is broadly applicable, we evaluate DUMP only in the context of large language models (LLMs) and do not extend the experiments to multimodal large language models (MLLMs) due to computational constraints. Second, our experiments are limited to 7B-scale models. Scaling our method to larger models remains an important direction for future work.", + "bbox": [ + 169, + 371, + 823, + 441 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 490, + 935, + 506, + 946 + ], + "page_idx": 14 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_model.json b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_model.json new file mode 100644 index 0000000000000000000000000000000000000000..c1b1a1cf9e0d11a393a4ce15b06345bd62d712f1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_model.json @@ -0,0 +1,2496 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.279, + 0.058, + 0.718 + ], + "angle": 270, + "content": "arXiv:2504.09710v3 [cs.LG] 11 Oct 2025" + }, + { + "type": "title", + "bbox": [ + 0.189, + 0.123, + 0.812, + 0.175 + ], + "angle": 0, + "content": "DUMP: Automated Distribution-Level Curriculum Learning for RL-based LLM Post-training" + }, + { + "type": "text", + "bbox": [ + 0.193, + 0.225, + 0.312, + 0.242 + ], + "angle": 0, + "content": "Zhenting Wang" + }, + { + "type": "text", + "bbox": [ + 0.339, + 0.226, + 0.438, + 0.242 + ], + "angle": 0, + "content": "Guofeng Cui" + }, + { + "type": "text", + "bbox": [ + 0.466, + 0.226, + 0.55, + 0.241 + ], + "angle": 0, + "content": "Yu-Jhe Li" + }, + { + "type": "text", + "bbox": [ + 0.576, + 0.226, + 0.66, + 0.241 + ], + "angle": 0, + "content": "Kun Wan\\*" + }, + { + "type": "text", + "bbox": [ + 0.688, + 0.226, + 0.803, + 0.241 + ], + "angle": 0, + "content": "Wentian Zhao\\*" + }, + { + "type": "text", + "bbox": [ + 0.384, + 0.261, + 0.613, + 0.277 + ], + "angle": 0, + "content": "\\(^{1}\\)Rutgers University \\(^{2}\\)Adobe Inc." + }, + { + "type": "title", + "bbox": [ + 0.46, + 0.313, + 0.538, + 0.329 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.343, + 0.769, + 0.648 + ], + "angle": 0, + "content": "Recent advances in reinforcement learning (RL)-based post-training have led to notable improvements in large language models (LLMs), particularly in enhancing their reasoning capabilities to handle complex tasks. However, most existing methods treat the training data as a unified whole, overlooking the fact that modern LLM training often involves a mixture of data from diverse distributions—varying in both source and difficulty. This heterogeneity introduces a key challenge: how to adaptively schedule training across distributions to optimize learning efficiency. In this paper, we present a principled curriculum learning framework grounded in the notion of distribution-level learnability. Our core insight is that the magnitude of policy advantages reflects how much a model can still benefit from further training on a given distribution. Based on this, we propose a distribution-level curriculum learning framework for RL-based LLM post-training, which leverages the Upper Confidence Bound (UCB) principle to dynamically adjust sampling probabilities for different distributions. This approach prioritizes distributions with either high average advantage (exploitation) or low sample count (exploration), yielding an adaptive and theoretically grounded training schedule. We instantiate our curriculum learning framework with GRPO as the underlying RL algorithm and demonstrate its effectiveness on logic reasoning datasets with multiple difficulties and sources. Our experiments show that our framework significantly improves convergence speed and final performance, highlighting the value of distribution-aware curriculum strategies in LLM post-training. Code: https://github.com/ZhentingWang/DUMP." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.672, + 0.314, + 0.687 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.702, + 0.827, + 0.801 + ], + "angle": 0, + "content": "Reinforcement learning (RL)-based post-training has emerged as a powerful approach for enhancing the capabilities of large language models (LLMs), particularly in areas requiring structured reasoning, multi-step inference, and task-specific generalization [1-4]. By leveraging reward signals derived from task performance, human feedback, or domain-specific metrics, RL provides a flexible alternative to supervised fine-tuning. Unlike imitation-based methods that merely mimic reference outputs, RL-based approaches allow models to optimize directly toward behavioral objectives, making them especially effective for boosting model performance on complex reasoning and agentic tasks." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.805, + 0.828, + 0.906 + ], + "angle": 0, + "content": "While RL-based post-training has become a key technique for enhancing LLM capabilities in reasoning, alignment, and coding, one foundational challenge remains underexplored: how to dynamically schedule training across heterogeneous data distributions. In practice, LLMs are post-trained on datasets drawn from a wide variety of sources—ranging from factual QA to math problems and coding tasks—each differing in knowledge/capability relevance, and learning difficulty [5-7]. This heterogeneity is evident in large-scale post-training datasets such as Tulu 3 [7], where prompts span general dialogue, logic puzzles, STEM problems, and multilingual instructions, with" + }, + { + "type": "footer", + "bbox": [ + 0.172, + 0.923, + 0.228, + 0.938 + ], + "angle": 0, + "content": "Preprint." + } + ], + [ + { + "type": "text", + "bbox": [ + 0.17, + 0.092, + 0.827, + 0.244 + ], + "angle": 0, + "content": "widely varying counts, formats, and alignment objectives. More recently, next-generation post-training pipelines (e.g., Seed-Thinking v1.5 [8]) have shifted toward synthetic data generation with controllable parameters—e.g., configuring logical puzzle difficulty. This allows fine-grained control over the data distribution, making distribution-level curriculum learning both feasible and increasingly important. Despite this, most RL-based pipelines still treat all data distributions equally—uniformly sampling tasks throughout training or relying on static, hand-designed curricula. This static treatment ignores the model's evolving learning needs and underutilizes the training budget. Moreover, it is difficult to handcraft effective curricula when the post-training data comes from multiple distributions lacking clear difficulty labels. As reinforcement learning becomes increasingly used in post-training and training costs continue to rise, a data-driven curriculum mechanism that dynamically prioritizes learnable distributions is not just desirable, but necessary." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.25, + 0.828, + 0.349 + ], + "angle": 0, + "content": "This motivates the need for automated distribution-level curriculum learning: a dynamic strategy that adjusts sampling probabilities across data distributions throughout training. While prior work has explored instance-level curricula based on sample difficulty [9], and static/heuristic multi-stage schedules have been applied in LLM post-training [10, 11], little attention has been paid to automated, distribution-level scheduling—especially in the context of RL for capability-oriented post-training. The central challenge lies in identifying signals that reflect the current learnability of each distribution and in designing algorithms that can stably and efficiently leverage these signals to guide sampling." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.354, + 0.828, + 0.617 + ], + "angle": 0, + "content": "In this paper, we present DUMP (Automated Distribution-level cUrriculumM learning for RL-based LLM Post-training), a simple but theoretically grounded approach to address this challenge. Our central insight is that the magnitude of policy advantages—the expected absolute difference between a model's predicted return and its baseline value—serves as a natural proxy for distribution-level learnability. High advantages on specific data distribution indicate underfitting and high potential for improvement on it, while low advantages suggest diminishing returns. Moreover, the statistical reliability of these advantage estimates improves with the number of samples drawn from each distribution. DUMP operationalizes this insight by using bandit-style Upper Confidence Bound (UCB) scores to schedule distribution sampling. It maintains a sliding window of recent advantage magnitudes for each distribution and computes a score that balances exploitation (high advantage) and exploration (low visitation). These scores are normalized via a softmax to form sampling weights, which are then used to generate training batches. Unlike fixed or heuristic curricula, DUMP adapts throughout training based on empirical signals, and can be seamlessly integrated into standard LLM RL pipelines. We instantiate DUMP with GRPO [3], but the method is compatible with any advantage-based RL algorithm. We evaluate DUMP on logic reasoning corpora. Our experiments show that DUMP significantly accelerates convergence and yields stronger performance compared to uniform sampling. Furthermore, we provide theoretical analysis that supports the use of absolute advantages as a surrogate for distribution-level learnability, formalizing its connection to sample efficiency and regret minimization." + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.623, + 0.829, + 0.707 + ], + "angle": 0, + "content": "We summarize our contributions as follows. ① We highlight the underexplored challenge of curriculum learning at the distribution level for RL-based post-training aimed at capability enhancement. ② We propose DUMP, a theoretically grounded framework that leverages advantage-based UCB scores to adaptively guide training over data distributions. ③ We demonstrate DUMP's effectiveness through empirical results and theoretical analysis, showing that it enables faster, more efficient improvement on LLM capabilities." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.721, + 0.312, + 0.737 + ], + "angle": 0, + "content": "2 Background" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.746, + 0.829, + 0.913 + ], + "angle": 0, + "content": "RL-based LLM Post-training. Reinforcement learning (RL) plays a central role in post-training large language models (LLMs), especially for tasks involving reasoning, subjective preference, or long-horizon control. The RLHF framework [1, 12-15] laid the foundation by aligning models using reward signals derived from human preferences. Beyond preference alignment, recent RL-based post-training approaches have notably enhanced LLMs' capabilities in complex reasoning tasks, particularly coding and mathematics. For instance, RL post-trained model OpenAI o1 [16], o3 [17, 18], DeepSeek-R1 [4] significantly outperform LLMs without RL post-training such as pre-trained versions of GPT-4o [19] and DeepSeek-V3 [20] on challenging mathematics and coding benchmarks (e.g., AIME [21] and Codeforces [22]). Proximal Policy Optimization (PPO) [23] is widely used in post-training due to its clipped objective, which stabilizes training by preventing large policy updates. PPO remains a strong baseline in many LLM alignment settings. Direct Preference Optimization (DPO) [2] simplifies the pipeline by replacing RL rollouts with a classification-style loss" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.176 + ], + "angle": 0, + "content": "derived from a KL-constrained reward maximization objective. While DPO works well on pairwise preference data, it does not naturally support group-wise or comparative feedback. Group Relative Policy Optimization (GRPO) [3] addresses this limitation by leveraging group-based feedback. For each input prompt \\( x \\), GRPO samples a group of \\( G \\) candidate outputs \\( \\{o_1, \\ldots, o_G\\} \\sim \\pi_{\\mathrm{ref}}(\\cdot | x) \\) from a frozen reference policy \\( \\pi_{\\mathrm{ref}} \\). Each output \\( o_i \\) is assigned a reward \\( r_i \\), and the advantage of \\( o_i \\) is computed by normalizing its reward relative to others in the group:" + }, + { + "type": "equation", + "bbox": [ + 0.388, + 0.185, + 0.826, + 0.218 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right) + \\epsilon}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.226, + 0.825, + 0.267 + ], + "angle": 0, + "content": "where \\(\\epsilon > 0\\) is a small constant for numerical stability. These normalized advantages capture the relative quality of outputs within the group. The model policy \\(\\pi_{\\theta}\\) is then updated by maximizing the following clipped surrogate objective:" + }, + { + "type": "equation", + "bbox": [ + 0.174, + 0.275, + 0.825, + 0.324 + ], + "angle": 0, + "content": "\\[\n\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {x, \\left\\{o _ {i} \\right\\}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)} \\hat {A} _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.324, + 0.825, + 0.424 + ], + "angle": 0, + "content": "where \\(\\pi_{\\theta}(o_i|x)\\) is the probability assigned by the current model to output \\(o_i\\), \\(\\pi_{\\mathrm{old}}(o_i|x)\\) is the same under the model from previous step, and \\(\\pi_{\\mathrm{ref}}(o_i|x)\\) is that under the reference model. The first term inside the summation is a clipped policy ratio scaled by \\(\\hat{A}_i\\), similar to PPO [23], which prevents overly large updates. The outer expectation is taken over prompts \\(x\\) and their sampled output groups \\(\\{o_i\\}\\). The second term is a KL divergence penalty that regularizes the updated policy \\(\\pi_{\\theta}\\) to stay close to \\(\\pi_{\\mathrm{ref}}\\), weighted by a hyperparameter \\(\\beta\\). This formulation eliminates the need for an explicit value baseline and stabilizes training by comparing outputs within local groups." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.43, + 0.827, + 0.638 + ], + "angle": 0, + "content": "Curriculum Learning for RL. Curriculum learning [24, 25] organizes training by progressing from easy to hard examples. In RL, curricula often follow task complexity [26-28], or are learned via teacher-student frameworks modeled as partially observable Markov decision process [29, 30]. With the adoption of RL in LLM post-training, curriculum learning has shown potential for improving both training efficiency and model effectiveness. For example, Curri-DPO [9] constructs instance-level curricula by ranking preference pairs based on the score gap between preferred and dispreferred responses, introducing harder pairs gradually during DPO fine-tuning. Kimi k1.5 [10] and LogicRL [11], on the other hand, use manually defined heuristic curricula with fixed training stages, e.g., models are first trained on \"easy\" samples for a pre-specified number of steps, then switched to \"hard\" samples. These strategies rely on static schedules and heuristic difficulty labels, without adapting to the model's learning progress. While these works demonstrate the benefit of curriculum learning in LLM post-training, most existing approaches focus on instance-level difficulty or use static, manually designed strategies. In contrast, automatic curriculum learning at the distribution level, especially in RL-based post-training, remains underexplored. In this paper, we propose DUMP to fill this gap by adaptively scheduling training over distributions using advantage-based learnability signals." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.652, + 0.273, + 0.667 + ], + "angle": 0, + "content": "3 Method" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.827, + 0.734 + ], + "angle": 0, + "content": "In this section, we introduce DUMP, a distribution-level curriculum learning framework for RL-based LLM post-training. We first introduce expected absolute advantage as a proxy for learnability, and formalize the scheduling problem as a multi-armed bandit. We then describe a UCB-based strategy to guide distribution selection, followed by the full implementation of DUMP." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.748, + 0.55, + 0.763 + ], + "angle": 0, + "content": "3.1 Measuring Learnability via Absolute Advantage" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.77, + 0.825, + 0.827 + ], + "angle": 0, + "content": "We aim to dynamically assess the usefulness of different data distributions during LLM reinforcement learning post-training. Intuitively, a distribution is more useful (or \"learnable\") if the model can gain more from training on its samples. To help understand and measure the learnability of the data samples from different distributions, we provide the following theorem:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.832, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Theorem 3.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy \\(\\pi_{\\theta}\\) and a data distribution \\(d\\), the expected absolute advantage \\(\\mathbb{E}_{x \\sim d} \\left[ \\mathbb{E}_{o_i \\sim \\pi_{\\theta}(\\cdot | x)} \\left[ |\\hat{A}_i| \\right] \\right]\\) serves as a proxy for how much that distribution \\(d\\) can help the model improve, where the distribution \\(d\\) consisting of prompts \\(x \\sim d\\), each prompt has a group of sampled outputs \\(\\{o_1, \\ldots, o_n\\}\\), and \\(\\hat{A}_i\\) denotes the advantage of output \\(o_i\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.827, + 0.286 + ], + "angle": 0, + "content": "The proof can be found in Appendix A. Intuitively, if training on a distribution results in a larger expected advantage magnitude, then that distribution is considered more learnable. The advantage function measures the deviation between an action's predicted value and its actual return; a large advantage—either positive or negative—indicates that the model's current policy is still far from optimal on those samples but has a large potential to improve. A small advantage magnitude does not necessarily imply mastery—it may also occur when a task is too difficult or noisy for the model to learn from effectively, resulting in weak or unstable learning signals. To capture this deviation in both directions, we take the absolute value of the advantage. Without this, positive and negative advantages within a batch may cancel out, masking the true extent of the model's uncertainty or suboptimality. By averaging the absolute advantage over multiple sampled outputs and prompts, we obtain a robust estimate of how much learning signal remains in a given distribution. This expected absolute advantage thus acts as a practical proxy for distribution-level learnability: it reflects how much the model can benefit from training on that distribution. It also has the strength of being lightweight to compute in RL pipelines, as advantage estimates are already generated during rollout." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.298, + 0.749, + 0.312 + ], + "angle": 0, + "content": "3.2 Formalizing Distribution-Level Curriculum Learning as Multi-armed Bandit" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.32, + 0.825, + 0.403 + ], + "angle": 0, + "content": "We aim to design a curriculum learning strategy that dynamically allocates training focus across multiple data distributions to maximize overall model improvement. Let \\(\\mathcal{D} = \\{d_1, \\dots, d_N\\}\\) be a set of data distributions. At each training step, we sample a batch of examples \\(\\mathcal{B}_t\\) by drawing prompts from these distributions according to a learnable sampling policy, and use the batch to update model parameters \\(\\theta\\) via reinforcement learning. The goal is to assign higher sampling probabilities to distributions that offer greater learning potential, thereby maximizing cumulative capability gain." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.41, + 0.827, + 0.768 + ], + "angle": 0, + "content": "As motivated in Theorem 3.1, we quantify the learning potential of a distribution \\(d\\) via its expected absolute advantage, defined as \\(L(d) = \\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o\\sim \\pi_{\\theta}(\\cdot |x)}\\left[\\left|\\hat{A}(o)\\right|\\right]\\right]\\). Our objective is to dynamically adjust the sampling distribution over \\(\\mathcal{D}\\) such that, over the training horizon \\(T\\), we approximately maximize the total expected learnability gain \\(\\sum_{t=1}^{T}\\mathbb{E}_{d\\sim P_t}[L(d)]\\), where \\(P_t\\) is the sampling distribution at step \\(t\\). This setup resembles a multi-armed bandit (MAB) problem, where each distribution acts as an arm and its reward corresponds to its learnability. In this setting, the central challenge is to estimate and balance each distribution's potential: exploiting those with high observed advantage while still exploring under-sampled ones that may offer long-term benefit. To this end, we adopt the classic Upper Confidence Bound (UCB) principle [31], which provides theoretical guarantees for balancing exploration and exploitation in bandit problems. Specifically, UCB-based algorithms achieve sublinear regret compared to the optimal fixed-arm strategy, and we show in Appendix B that applying UCB on empirical advantage statistics yields a near-optimal schedule under mild assumptions. To allow smoother allocation of sampling probabilities without hard cutoffs and reducing variance in learning, we adopt a soft-selection mechanism: instead of choosing one distribution at each step, we compute a UCB score for every distribution and normalize the scores with a softmax function to obtain a sampling distribution. This soft-selection formulation preserves the spirit of UCB—higher scoring distributions are sampled more—but enables partial exploration of all arms, and it is easier to integrate into LLM training pipelines. The resulting sampling distribution provides a convex mixture over data sources, where each distribution \\(d_j\\) is selected with probability. Each training batch is then composed by drawing examples from multiple distributions in proportion to their scores. To estimate learnability in practice, we maintain a sliding window \\(\\mathcal{A}_{d_j}^w\\) of recent absolute advantages for each distribution \\(d_j\\), and define its empirical reward as the mean absolute advantage: \\(\\hat{L}(d_j) = \\frac{1}{|\\mathcal{A}_{d_j}^w|}\\sum_{a\\in \\mathcal{A}_{d_j}^w}|a|\\). We also track the total number of samples drawn from each distribution \\(n_{d_j}\\), and the global sample count \\(n_{\\mathrm{total}} = \\sum_{j}n_{d_j}\\). The UCB score for each distribution is:" + }, + { + "type": "equation", + "bbox": [ + 0.362, + 0.775, + 0.825, + 0.814 + ], + "angle": 0, + "content": "\\[\n\\mathrm {U C B} \\left(d _ {j}\\right) = \\hat {L} \\left(d _ {j}\\right) + \\sqrt {\\frac {2 \\log \\left(n _ {\\text {t o t a l}} + 1\\right)}{n _ {d _ {j}} + 1}} \\tag {3}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.82, + 0.825, + 0.912 + ], + "angle": 0, + "content": "The first term encourages exploitation of distributions with high observed advantages, while the second term ensures sufficient exploration of rarely sampled distributions. To obtain the final sampling weights, we apply a softmax over the UCB scores. Specifically, the probability of selecting distribution \\( d_{j} \\) is computed as: \\( P(d_{j}) = \\frac{\\exp(\\mathrm{UCB}(d_{j}) / \\tau)}{\\sum_{j=1}^{N} \\exp(\\mathrm{UCB}(d_{j}) / \\tau)} \\), where \\( \\tau > 0 \\) is a temperature hyperparameter that controls the sharpness of the sampling distribution. A lower \\( \\tau \\) results in more peaked selection around the top-scoring distributions, while a higher \\( \\tau \\) leads to a smoother, more exploratory curriculum. This" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.174, + 0.091, + 0.741, + 0.107 + ], + "angle": 0, + "content": "Algorithm 1 Automated Distribution-Level Curriculum Learning with UCB Sampling" + }, + { + "type": "algorithm", + "bbox": [ + 0.174, + 0.108, + 0.826, + 0.521 + ], + "angle": 0, + "content": "Input: Dataset \\(\\mathcal{D} = \\{d_1,\\dots ,d_N\\}\\) ; pre-trained model parameters \\(\\theta\\) \nOutput: Post-trained model parameters \\(\\theta\\) \n1: function DUMP(D, \\(\\theta\\) \n2: Initialize distribution-level statistics \n3: for each \\(d_{j}\\in \\mathcal{D}\\) do \n4: \\(A_{dj}^{w}\\gets []\\) Sliding window for absolute advantages \n5: \\(n_{d_j}\\gets 0\\) Total samples seen from \\(d_{j}\\) \n6: \\(P(d_{j})\\leftarrow \\frac{1}{N}\\) Equal initial weights \n7: for training step \\(t = 1,2,\\ldots ,T\\) do \n8: Sample batch \\(\\mathcal{B}_t\\) from \\(\\mathcal{D}\\) according to \\(P(d_j)\\) \n9: Compute advantages \\(\\hat{A} (o)\\) for all \\(o\\in \\mathcal{B}_t\\) via model rollout \n10: for each \\(d_{j}\\) with samples in \\(\\mathcal{B}_t\\) do \n11: \\(n_{d_j}\\gets n_{d_j} + |\\mathcal{B}_{t,d_j}|\\) Update sample count; \\(\\mathcal{B}_{t,d_j}\\) : subset of batch from \\(d_j\\) \n12: \\(\\mathcal{A}_{dj}^{w}\\gets \\mathcal{A}_{dj}^{w}\\cup \\{\\left|\\hat{A} (o)\\right|\\mid x\\in \\mathcal{B}_{t,d_j},o\\sim \\pi_{\\theta}(\\cdot |x)\\}\\) Append new advantages from \\(d_j\\) \n13: \\(A_{dj}^{w}\\gets A_{dj}^{w}[-k:]\\) k: Window Size; Keep last \\(k\\) elements \n14: Compute UCB scores for each distribution \n15: \\(n_{\\mathrm{total}}\\gets \\sum_{d_j\\in \\mathcal{D}}n_{d_j}\\) \n16: for each \\(d_{j}\\in \\mathcal{D}\\) do \n17: \\(\\hat{L} (d_j)\\gets \\frac{1}{|\\mathcal{A}_{dj}^w|}\\sum_{a\\in \\mathcal{A}_{dj}^w}a\\) Mean of absolute advantages \n18: UCB \\((d_j)\\gets \\hat{L} (d_j) + \\sqrt{\\frac{2\\log(n_{\\mathrm{total}} + 1)}{n_{d_j} + 1}}\\) Eq.3 \n19: Update sampling distribution \n20: \\(P(d_j)\\gets \\frac{\\exp(UCB(d_j) / \\tau)}{\\sum_{j = 1}^{N}\\exp(UCB(d_j) / \\tau)}\\quad \\forall d_j\\in \\mathcal{D}\\) △: temperature \n21: Update \\(\\theta\\) using \\(\\mathcal{B}_t\\) with an RL algorithm (e.g., GRPO) \n22: return \\(\\theta\\)" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.548, + 0.825, + 0.605 + ], + "angle": 0, + "content": "bandit-based formulation provides a lightweight, adaptive, and reward-sensitive curriculum learning mechanism. It balances the need to focus on learnable distributions while avoiding premature neglect of underexplored ones. In the next section, we present the complete algorithmic implementation of DUMP, including its integration with rollout procedures and online statistics tracking." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.615, + 0.287, + 0.63 + ], + "angle": 0, + "content": "3.3 Algorithm" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.634, + 0.827, + 0.913 + ], + "angle": 0, + "content": "The detailed curriculum learning procedure is illustrated in Algorithm 1. The algorithm takes as input a dataset \\(\\mathcal{D} = \\{d_1, \\ldots, d_N\\}\\) composed of multiple distributions and returns the optimized model parameters \\(\\theta\\) through a reinforcement learning loop. In lines 3-6, we initialize per-distribution statistics: each distribution \\(d_j \\in \\mathcal{D}\\) is associated with an empty sliding window \\(\\mathcal{A}_{d_j}^w\\) to store recent absolute advantages, a counter \\(n_{d_j}\\) for tracking the number of samples drawn from \\(d_j\\), and an initial sampling probability \\(P(d_j) = \\frac{1}{N}\\) indicating uniform sampling. At each training step \\(t\\) (line 8), a batch \\(\\mathcal{B}_t\\) is sampled according to the current distribution weights \\(P(d_j)\\). Advantages \\(\\hat{A}(o)\\) are then computed via model rollouts for each sampled output \\(o \\in \\mathcal{B}_t\\) (line 9). For every distribution \\(d_j\\) that contributes samples in the current batch, we update its sample count \\(n_{d_j}\\) (line 11), append the corresponding advantages to its sliding window \\(\\mathcal{A}_{d_j}^w\\) (line 12), and truncate the window to retain only the most recent \\(k\\) entries (300 by default) in line 13. This ensures that our estimate of per-distribution learnability remains up-to-date and robust to noise. In lines 15-18, we compute the Upper Confidence Bound (UCB) score \\(\\mathrm{UCB}(d_j)\\) for each distribution. The score consists of two terms: the empirical mean absolute advantage \\(\\hat{L}(d_j)\\) over the sliding window \\(\\mathcal{A}_{d_j}^w\\), and an exploration bonus inversely proportional to the square root of the number of samples \\(n_{d_j}\\). This balances prioritization of distributions that are either highly learnable or underexplored. In line 20, the sampling probabilities \\(P(d_j)\\) are updated by applying a softmax over the UCB scores with a temperature parameter \\(\\tau\\) (0.1 by default). Lower values of \\(\\tau\\) result in sharper distributions that concentrate more heavily on top-ranked distributions, while higher \\(\\tau\\) values induce a smoother, more exploratory curriculum. Finally, in line" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.171, + 0.092, + 0.825, + 0.135 + ], + "angle": 0, + "content": "21, the model parameters \\(\\theta\\) are updated using the current batch \\(\\mathcal{B}_t\\) with a reinforcement learning algorithm such as GRPO. After \\(T\\) steps, the algorithm returns the post-trained model \\(\\theta\\), which has been adaptively guided to learn from the most informative distributions." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.148, + 0.416, + 0.166 + ], + "angle": 0, + "content": "4 Experiments and Results" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.172, + 0.828, + 0.229 + ], + "angle": 0, + "content": "In this section, we first introduce our experiments setup including used models datasets and more implementation details. We then demonstrate the results for the effectiveness of our method DUMP. More discussion about the comparison to static heuristic curriculum [11, 10] can be found in Appendix C." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.242, + 0.347, + 0.258 + ], + "angle": 0, + "content": "4.1 Experiments Setup" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.264, + 0.829, + 0.307 + ], + "angle": 0, + "content": "RL Algorithm and LLM Models. We use GRPO [3] as the underlying RL algorithm in our experiments, which is commonly used in capability-oriented LLM post-training [4]. We use Qwen2.5-7B-Instruct-1M [32] and Qwen2.5-3B-Instruct [32] in our experiments." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.313, + 0.827, + 0.369 + ], + "angle": 0, + "content": "Datasets and Settings. Multiple datasets are used in our experiments, including Knights and Knaves (K&K) puzzle dataset [33], RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench [39], GSM-8K [40], and AIME 1983-2024 [21]. In our experiments, we consider three different settings. The prompt template used in shown in Figure 3 in the Appendix." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.375, + 0.827, + 0.472 + ], + "angle": 0, + "content": "Setting 1: Post-training on \\( K \\& K \\) puzzles with varying character numbers. The Knights and Knaves (K&K) dataset [33] contains procedurally generated logic puzzles where each character is either a knight (always truthful) or a knave (always lying), and the goal is to infer each character's identity. The dataset supports fine-grained difficulty control by adjusting the number of characters. We generate puzzles with 3 to 14 characters, treating each character count as a separate distribution—yielding 12 distinct distributions. Each distribution includes 900 training and 100 test samples. We post-train Qwen2.5-7B-Instruct-1M on the combined dataset across all distributions." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.478, + 0.827, + 0.577 + ], + "angle": 0, + "content": "Setting 2: Post-training on diverse logic reasoning distributions. We perform post-training using a mixture of logic reasoning datasets, including RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench Geomotion [39], and Knights and Knaves (K&K) [33]. For RuleTaker, ProofWriter, and K&K, we further partition the data distributions by complexity levels: RuleTaker by 2, 3, and 5 required reasoning steps; ProofWriter by 3, 4, and 5 required reasoning steps; and K&K by the number of characters (3-7). In total, we construct 15 logic distributions, each containing 400 training samples. We use Qwen2.5-7B-Instruct-1M for this setting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.582, + 0.827, + 0.665 + ], + "angle": 0, + "content": "Setting 3: Post-training on diverse math reasoning distributions. We also explore post-training on diverse math data. For AIME, we split the data into four distributions based on competition years—1983–1993, 1994–2004, 2005–2015, and 2016–2024—since problem styles evolve significantly over time. We also include GSM-8K as a complementary math dataset. This results in five math distributions in total, with 7473 (GSM-8K), 124, 194, 283, and 238 training samples, respectively. We use Qwen2.5-3B-Instruct for this setting." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.671, + 0.827, + 0.742 + ], + "angle": 0, + "content": "Reward Implementation. We adopt the rule-based reward mechanism Shao et al. [3] to provide stable and hack-resistant training signals during RL-based post-training and follow the detailed reward implementation in Logic-RL [11]. Specifically, each model response is expected to follow a structured format with the reasoning process enclosed in tags and the final answer enclosed in tags. The reward system consists of two components:" + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.754, + 0.825, + 0.797 + ], + "angle": 0, + "content": "- Format Reward. A binary reward based on whether the output strictly adheres to the expected format. If the model includes exactly one well-formed and one section in the correct order, it receives a reward of +1; otherwise, it receives a penalty of -1." + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.801, + 0.827, + 0.844 + ], + "angle": 0, + "content": "- Answer Reward. We evaluate the correctness of the final answer. If the predicted identities fully match the ground truth, the model receives a reward of \\( +2 \\); if the answer is incorrect, -1.5; and if the answer is missing or unparsable, -2." + }, + { + "type": "list", + "bbox": [ + 0.217, + 0.754, + 0.827, + 0.844 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Other Implementation Details. All experiments are conducted on servers equipped with 8 Nvidia A100 GPUs. Our method is implemented with VeRL [41] LLM Reinforcement Learning framework. We use GRPO [3] as the training algorithm and follow standard practice for actor rollout and optimization. The actor learning rate is set to \\(1e - 6\\), training batch size is set to 128, and the PPO" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.203, + 0.089, + 0.486, + 0.284 + ], + "angle": 0, + "content": "
Data Distributionwithout DUMPwith DUMP
RuleTaker 2 Steps0.790.79
RuleTaker 3 Steps0.761.02
RuleTaker 5 Steps0.560.98
ProofWriter 3 Steps1.181.09
ProofWriter 4 Steps0.971.09
ProofWriter 5 Steps1.241.05
AR-LSAT-0.70-0.52
LogiQA1.941.70
LogicNLI-0.29-0.23
LongICLBench Geomotion0.540.25
K & K 3 Characters2.002.00
K & K 4 Characters1.541.76
K & K 5 Characters1.531.84
K & K 6 Characters0.831.42
K & K 7 Characters0.561.02
Average0.901.17
" + }, + { + "type": "table_caption", + "bbox": [ + 0.171, + 0.29, + 0.516, + 0.332 + ], + "angle": 0, + "content": "Table 1: Test Answer Reward (see Section 4.1) on diverse logic reasoning distributions (Setting 2). The model used here is Qwen2.5-7B-Instruct-1M." + }, + { + "type": "table", + "bbox": [ + 0.554, + 0.14, + 0.795, + 0.233 + ], + "angle": 0, + "content": "
Data Distributionwithout DUMPwith DUMP
GSM-8K1.501.47
AIME 1983-1993-0.76-0.39
AIME 1994-2004-1.50-1.02
AIME 2005-2015-0.94-0.94
AIME 2016-2024-1.27-1.27
Average-0.59-0.43
" + }, + { + "type": "table_caption", + "bbox": [ + 0.524, + 0.24, + 0.825, + 0.296 + ], + "angle": 0, + "content": "Table 2: Test Answer Reward (see Section 4.1) on diverse math reasoning distributions (Setting 3). The model used here is Qwen2.5-3B-Instruct." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.34, + 0.825, + 0.41 + ], + "angle": 0, + "content": "mini-batch size is 32. KL divergence regularization is applied to encourage alignment with the reference policy, with a KL loss coefficient of 0.001. Each rollout batch contains 16 responses. If not specified, we allow for a maximum response length of 20480 and 4096 tokens during training for Qwen2.5-7B-Instruct-1M and Qwen2.5-3B-Instruct, respectively. The window size \\( k \\) and the temperature \\( \\tau \\) in our curriculum learning framework is set to 300 and 0.1, respectively." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.42, + 0.378, + 0.433 + ], + "angle": 0, + "content": "4.2 Effectiveness of DUMP" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.439, + 0.827, + 0.634 + ], + "angle": 0, + "content": "Setting 1: Post-training on the combination of K&K puzzle datasets with different number of characters. To evaluate the effectiveness of DUMP in improving post-training efficiency and performance, we compare it against a uniform distribution sampling baseline across 12 distinct data distributions in the K&K puzzle dataset. Each distribution corresponds to a fixed number of characters in the puzzle, ranging from 3 to 14. Figure 1 plots the test answer reward over training steps for each distribution, with and without DUMP. Across all distributions, DUMP consistently outperforms the baseline, achieving faster convergence and higher test performance. The gains are particularly notable in mid- to high-difficulty distributions (e.g., 6 to 12 characters), where uniform sampling tends to struggle due to data underutilization. For example, in the 9-character distribution (Figure 1g), the model trained with DUMP achieves a reward of over 0.5, whereas the baseline remains below 0.0. These results validate the core intuition of DUMP: dynamically adjusting the sampling focus toward high-learnability distributions accelerates policy improvement while avoiding wasted effort on over-saturated or low-signal data. Notably, the improvement is achieved without any curriculum heuristics or manual data ordering—only by observing advantage signals and adapting online." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.639, + 0.825, + 0.737 + ], + "angle": 0, + "content": "Setting 2: Post-training on diverse logic reasoning distributions. We apply DUMP to 15 logic reasoning distributions including subsets of RuleTaker, ProofWriter, and K&K (with varying difficulty levels), as well as datasets such as AR-LSAT, LogiQA, LogicNLI, and LongICLBench. As shown in Table 1, DUMP improves the average test answer reward from 0.90 to 1.17. Notable improvements are observed on complex tasks such as AR-LSAT, where the reward increases from -0.70 to -0.52, and K&K 7 Characters, from 0.56 to 1.02. These results demonstrate that DUMP adaptively prioritizes undertrained but learnable distributions, leading to more efficient capability gains." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.742, + 0.825, + 0.811 + ], + "angle": 0, + "content": "Setting 3: Post-training on diverse math data distributions. We further evaluate DUMP on GSM-8K and different subsets of AIME grouped by competition years. As shown in Table 2, DUMP raises the average test answer reward from -0.59 to -0.43, with the most significant gain on AIME 1994-2004, where performance improves from -1.50 to -1.02. These results highlight DUMP's robustness under distribution shifts and data imbalance." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.823, + 0.499, + 0.838 + ], + "angle": 0, + "content": "4.3 Ablation Study on the Sampling Strategy" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.842, + 0.825, + 0.913 + ], + "angle": 0, + "content": "In this section, we ablate the sampling strategy used in DUMP's UCB-based scheduler. As described in Algorithm 1, our method applies soft sampling controlled by a temperature parameter. The greedy variant (temperature \\(= 0\\)) always selects the distribution with the highest UCB score, while our default uses a small temperature (0.1) to enable probabilistic sampling. We conduct experiments under Setting 1, with a maximum training response length of 10240 tokens. After 100 training steps, the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.09, + 0.386, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.207, + 0.331, + 0.22 + ], + "angle": 0, + "content": "(a) 3 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.09, + 0.603, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.207, + 0.548, + 0.22 + ], + "angle": 0, + "content": "(b) 4 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.091, + 0.821, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.207, + 0.765, + 0.22 + ], + "angle": 0, + "content": "(c) 5 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.222, + 0.386, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.338, + 0.331, + 0.351 + ], + "angle": 0, + "content": "(d) 6 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.222, + 0.603, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.339, + 0.547, + 0.351 + ], + "angle": 0, + "content": "(e) 7 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.222, + 0.821, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.669, + 0.339, + 0.763, + 0.351 + ], + "angle": 0, + "content": "(f) 8 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.354, + 0.386, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.47, + 0.331, + 0.483 + ], + "angle": 0, + "content": "(g) 9 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.354, + 0.603, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.447, + 0.47, + 0.551, + 0.482 + ], + "angle": 0, + "content": "(h) 10 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.354, + 0.821, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.666, + 0.47, + 0.766, + 0.482 + ], + "angle": 0, + "content": "(i) 11 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.485, + 0.386, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.602, + 0.333, + 0.614 + ], + "angle": 0, + "content": "(j) 12 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.485, + 0.603, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.447, + 0.602, + 0.551, + 0.613 + ], + "angle": 0, + "content": "(k) 13 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.485, + 0.821, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.666, + 0.602, + 0.766, + 0.613 + ], + "angle": 0, + "content": "(1) 14 Characters" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.622, + 0.825, + 0.665 + ], + "angle": 0, + "content": "Figure 1: Effectiveness of DUMP on the K&K puzzle dataset mixed with 12 distributions defined by the number of characters in each puzzle (Setting 1). DUMP consistently achieves higher answer reward on test dataset compared to baseline. The model used here is Qwen2.5-7B-Instruct-1M." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.677, + 0.825, + 0.747 + ], + "angle": 0, + "content": "greedy strategy significantly underperforms due to its lack of exploration—it tends to lock onto a single distribution early and fails to adapt. For instance, on the 13- and 14-character K&K tasks, the greedy variant achieves test answer rewards of \\(-0.91\\) and \\(-1.38\\), while soft sampling reaches \\(-0.66\\) and \\(-1.16\\), respectively. These results highlight the importance of maintaining exploration via a non-zero temperature to prevent the scheduler from collapsing onto suboptimal distributions." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.756, + 0.557, + 0.771 + ], + "angle": 0, + "content": "4.4 Analyzing the Automated Curriculum by DUMP" + }, + { + "type": "text", + "bbox": [ + 0.17, + 0.773, + 0.827, + 0.913 + ], + "angle": 0, + "content": "To understand how DUMP dynamically allocates training effort across data distributions, we analyze the sampling patterns induced by its UCB-based curriculum mechanism. Figure 2 shows the cumulative number of samples drawn from each distribution (3 to 14 characters) over the course of training on K&K puzzles with varying character numbers (Setting 1). We observe a clear curriculum-like progression: distributions corresponding to simpler puzzles (e.g., 3-5 characters) are heavily sampled in the early stages of training, while more complex distributions (e.g., 10-14 characters) are gradually introduced and increasingly prioritized as training progresses. This pattern aligns with the model's evolving capacity—early training favors distributions with high initial advantage magnitudes, and as the model saturates on those, DUMP shifts focus to underexplored but learnable distributions. Importantly, this adaptive sampling behavior emerges automatically from empirical advantage signals" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.947 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.177, + 0.09, + 0.387, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.207, + 0.331, + 0.22 + ], + "angle": 0, + "content": "(a) 3 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.09, + 0.603, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.207, + 0.548, + 0.22 + ], + "angle": 0, + "content": "(b) 4 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.091, + 0.821, + 0.198 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.207, + 0.765, + 0.22 + ], + "angle": 0, + "content": "(c) 5 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.222, + 0.387, + 0.331 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.338, + 0.331, + 0.351 + ], + "angle": 0, + "content": "(d) 6 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.222, + 0.603, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.451, + 0.338, + 0.548, + 0.351 + ], + "angle": 0, + "content": "(e) 7 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.222, + 0.821, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.668, + 0.338, + 0.764, + 0.351 + ], + "angle": 0, + "content": "(f) 8 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.353, + 0.387, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.234, + 0.47, + 0.331, + 0.483 + ], + "angle": 0, + "content": "(g) 9 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.354, + 0.603, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.446, + 0.47, + 0.552, + 0.482 + ], + "angle": 0, + "content": "(h) 10 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.354, + 0.821, + 0.462 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.666, + 0.47, + 0.766, + 0.482 + ], + "angle": 0, + "content": "(i) 11 Characters" + }, + { + "type": "image", + "bbox": [ + 0.176, + 0.484, + 0.387, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.232, + 0.601, + 0.333, + 0.614 + ], + "angle": 0, + "content": "(j) 12 Characters" + }, + { + "type": "image", + "bbox": [ + 0.393, + 0.484, + 0.603, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.446, + 0.601, + 0.551, + 0.614 + ], + "angle": 0, + "content": "(k) 13 Characters" + }, + { + "type": "image", + "bbox": [ + 0.61, + 0.484, + 0.821, + 0.593 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.666, + 0.601, + 0.766, + 0.613 + ], + "angle": 0, + "content": "(1) 14 Characters" + }, + { + "type": "image_caption", + "bbox": [ + 0.171, + 0.622, + 0.827, + 0.692 + ], + "angle": 0, + "content": "Figure 2: Curriculum (sample counts) induced by DUMP across 12 K&K puzzle distributions with increasing difficulty defined by the number of characters in each puzzle (Setting 1). Simpler distributions are automatically prioritized in early training, while more complex ones are progressively emphasized—both in an entirely automated manner—demonstrating automated distribution scheduling." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.716, + 0.825, + 0.759 + ], + "angle": 0, + "content": "without requiring manual specification of curriculum order. These results highlight DUMP's ability to construct an implicit, data-driven curriculum that mirrors traditional easy-to-hard strategies, while remaining responsive to online training dynamics." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.783, + 0.303, + 0.799 + ], + "angle": 0, + "content": "5 Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.815, + 0.827, + 0.913 + ], + "angle": 0, + "content": "In this work, we introduce a distribution-level curriculum learning framework for RL-based posttraining of large language models. DUMP leverages the expected absolute advantage as a learnability signal to adaptively allocate training focus across heterogeneous distributions. By formalizing scheduling as a multi-armed bandit and adopting a UCB-based sampling strategy, DUMP balances exploitation and exploration in a principled way. Experiments demonstrate that DUMP consistently improves convergence and final performance over baselines. These results highlight the value of distribution-aware curriculum learning in LLM RL post-training." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.174, + 0.09, + 0.27, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.114, + 0.826, + 0.17 + ], + "angle": 0, + "content": "[1] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.182, + 0.182, + 0.826, + 0.226 + ], + "angle": 0, + "content": "[2] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.236, + 0.826, + 0.279 + ], + "angle": 0, + "content": "[3] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.29, + 0.826, + 0.333 + ], + "angle": 0, + "content": "[4] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.344, + 0.826, + 0.4 + ], + "angle": 0, + "content": "[5] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. The flan collection: Designing data and methods for effective instruction tuning. In International Conference on Machine Learning, pages 22631-22648. PMLR, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.411, + 0.826, + 0.455 + ], + "angle": 0, + "content": "[6] Harrison Lee, Samrat Phatale, Hassan Mansoor, Thomas Mesnard, Johan Ferret, Kellie Lu, Colton Bishop, Ethan Hall, Victor Carbune, Abhinav Rastogi, et al. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv e-prints, pages arXiv-2309, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.466, + 0.826, + 0.509 + ], + "angle": 0, + "content": "[7] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, et al. T\\''ulu 3: Pushing frontiers in open language model post-training. arXiv preprint arXiv:2411.15124, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.519, + 0.826, + 0.563 + ], + "angle": 0, + "content": "[8] ByteDance Seed. Seed-thinking-v1.5: Advancing superb reasoning models with reinforcement learning. Technical report, ByteDance, 2025. URL https://github.com/ByteDance-Seed/Seed-Thinking-v1.5." + }, + { + "type": "ref_text", + "bbox": [ + 0.183, + 0.573, + 0.826, + 0.628 + ], + "angle": 0, + "content": "[9] Pulkit Pattnaik, Rishabh Maheshwary, Kelechi Ogueji, Vikas Yadav, and Sathwik Tejaswi Madhusudhan. Enhancing alignment using curriculum learning & ranked preferences. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 12891-12907, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.64, + 0.826, + 0.684 + ], + "angle": 0, + "content": "[10] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.693, + 0.826, + 0.738 + ], + "angle": 0, + "content": "[11] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.748, + 0.826, + 0.791 + ], + "angle": 0, + "content": "[12] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.802, + 0.826, + 0.845 + ], + "angle": 0, + "content": "[13] Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.856, + 0.826, + 0.911 + ], + "angle": 0, + "content": "[14] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.114, + 0.826, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.828, + 0.135 + ], + "angle": 0, + "content": "[15] Amelia Glaese, Nat McAleese, Maja Trebacz, John Aslanides, Vlad Firoiu, Timo Ewalds, Maribeth Rauh, Laura Weidinger, Martin Chadwick, Phoebe Thacker, et al. Improving alignment of dialogue agents via targeted human judgements. arXiv preprint arXiv:2209.14375, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.144, + 0.828, + 0.174 + ], + "angle": 0, + "content": "[16] OpenAI. Learning to reason with llms. Technical report, OpenAI, 2024. URL https://openai.com/index/learning-to-reason-with-llms/." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.182, + 0.827, + 0.212 + ], + "angle": 0, + "content": "[17] OpenAI. Openai o3-mini. Technical report, OpenAI, 2025. URL https://openai.com/index/openai-o3-mini/." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.221, + 0.825, + 0.264 + ], + "angle": 0, + "content": "[18] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaiev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.273, + 0.827, + 0.316 + ], + "angle": 0, + "content": "[19] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.325, + 0.827, + 0.368 + ], + "angle": 0, + "content": "[20] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.377, + 0.827, + 0.407 + ], + "angle": 0, + "content": "[21] Aime_1983_2024 (revision 6283828), 2025. URL https://huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.415, + 0.799, + 0.431 + ], + "angle": 0, + "content": "[22] Mikhail Mirzayanov. Codeforces. https://codeforces.com/. Accessed: 2025-04-13." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.44, + 0.825, + 0.47 + ], + "angle": 0, + "content": "[23] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.479, + 0.827, + 0.521 + ], + "angle": 0, + "content": "[24] Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pages 41-48, 2009." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.531, + 0.827, + 0.574 + ], + "angle": 0, + "content": "[25] Alex Graves, Marc G Bellemare, Jacob Menick, Remi Munos, and Koray Kavukcuoglu. Automated curriculum learning for neural networks. In international conference on machine learning, pages 1311-1320. Pmlr, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.583, + 0.825, + 0.627 + ], + "angle": 0, + "content": "[26] Niels Justesen, Ruben Rodriguez Torrado, Philip Bontrager, Ahmed Khalifa, Julian Togelius, and Sebastian Risi. Illuminating generalization in deep reinforcement learning through procedural level generation. arXiv preprint arXiv:1806.10729, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.635, + 0.825, + 0.679 + ], + "angle": 0, + "content": "[27] Rui Wang, Joel Lehman, Jeff Clune, and Kenneth O Stanley. Paired open-ended trailblazer (poet): Endlessly generating increasingly complex and diverse learning environments and their solutions. arXiv preprint arXiv:1901.01753, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.688, + 0.825, + 0.731 + ], + "angle": 0, + "content": "[28] Richard Li, Allan Jabri, Trevor Darrell, and Pulkit Agrawal. Towards practical multi-object manipulation using relational reinforcement learning. In 2020 IEEE international conference on robotics and automation (icra), pages 4051-4058. IEEE, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.74, + 0.827, + 0.769 + ], + "angle": 0, + "content": "[29] Tambet Matiisen, Avital Oliver, Taco Cohen, and John Schulman. Teacher-student curriculum learning. IEEE transactions on neural networks and learning systems, 31(9):3732-3740, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.779, + 0.825, + 0.822 + ], + "angle": 0, + "content": "[30] Rémy Portelas, Cédric Colas, Katja Hofmann, and Pierre-Yves Oudeyer. Teacher algorithms for curriculum learning of deep rl in continuously parameterized environments. In Conference on Robot Learning, pages 835-853. PMLR, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.173, + 0.831, + 0.825, + 0.86 + ], + "angle": 0, + "content": "[31] Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. Finite-time analysis of the multiarmed bandit problem. Machine learning, 47:235-256, 2002." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.87, + 0.825, + 0.911 + ], + "angle": 0, + "content": "[32] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + }, + { + "type": "list", + "bbox": [ + 0.173, + 0.091, + 0.828, + 0.911 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.134 + ], + "angle": 0, + "content": "[33] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.142, + 0.827, + 0.172 + ], + "angle": 0, + "content": "[34] Peter Clark, Oyvind Tafjord, and Kyle Richardson. Transformers as soft reasoners over language. arXiv preprint arXiv:2002.05867, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.18, + 0.827, + 0.21 + ], + "angle": 0, + "content": "[35] Oyvind Tafjord, Bhavana Dalvi Mishra, and Peter Clark. Proofwriter: Generating implications, proofs, and abductive statements over natural language. arXiv preprint arXiv:2012.13048, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.218, + 0.825, + 0.26 + ], + "angle": 0, + "content": "[36] Wanjun Zhong, Siyuan Wang, Duyu Tang, Zenan Xu, Daya Guo, Jiahai Wang, Jian Yin, Ming Zhou, and Nan Duan. Ar-lsat: Investigating analytical reasoning of text. arXiv preprint arXiv:2104.06598, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.269, + 0.825, + 0.311 + ], + "angle": 0, + "content": "[37] Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning. arXiv preprint arXiv:2007.08124, 2020." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.321, + 0.825, + 0.364 + ], + "angle": 0, + "content": "[38] Jidong Tian, Yitian Li, Wenqing Chen, Liqiang Xiao, Hao He, and Yaohui Jin. Diagnosing the first-order logical reasoning ability through logicli. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3738-3747, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.372, + 0.824, + 0.402 + ], + "angle": 0, + "content": "[39] Tianle Li, Ge Zhang, Quy Duc Do, Xiang Yue, and Wenhu Chen. Long-context llms struggle with long in-context learning. arXiv preprint arXiv:2404.02060, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.174, + 0.41, + 0.827, + 0.464 + ], + "angle": 0, + "content": "[40] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.175, + 0.474, + 0.825, + 0.518 + ], + "angle": 0, + "content": "[41] Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + }, + { + "type": "list", + "bbox": [ + 0.174, + 0.091, + 0.827, + 0.518 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.173, + 0.09, + 0.396, + 0.106 + ], + "angle": 0, + "content": "A Proof for Theorem 3.1" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.12, + 0.827, + 0.203 + ], + "angle": 0, + "content": "Theorem A.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy \\(\\pi_{\\theta}\\) and a data distribution \\(d\\), the expected absolute advantage \\(\\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o_i\\sim \\pi_\\theta (\\cdot |x)}\\left[|\\hat{A}_i|\\right]\\right]\\) serves as a proxy for how much that distribution \\(d\\) can help the model improve, where the distribution \\(d\\) consisting of prompts \\(x\\sim d\\), each prompt has a group of sampled outputs \\(\\{o_1,\\ldots ,o_n\\}\\), and \\(\\hat{A}_i\\) denotes the advantage of output \\(o_i\\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.216, + 0.825, + 0.246 + ], + "angle": 0, + "content": "Proof. Let \\(\\pi_{\\theta}\\) be the current model policy. Consider a data distribution \\(d\\), where \\(x \\sim d\\) are prompts and \\(\\{o_1, \\ldots, o_n\\} \\sim \\pi_{\\theta}(\\cdot | x)\\) are sampled outputs. For each output \\(o_i\\), the advantage is estimated as" + }, + { + "type": "equation", + "bbox": [ + 0.443, + 0.251, + 0.554, + 0.268 + ], + "angle": 0, + "content": "\\[\n\\hat {A} _ {i} = r _ {i} - b (x),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.273, + 0.827, + 0.303 + ], + "angle": 0, + "content": "where \\( r_i \\) is the reward assigned to \\( o_i \\) and \\( b(x) \\) is a baseline (e.g., the mean reward over the group). The policy gradient under common policy-gradient methods (e.g., PPO or GRPO) can be written as:" + }, + { + "type": "equation", + "bbox": [ + 0.308, + 0.307, + 0.686, + 0.334 + ], + "angle": 0, + "content": "\\[\n\\nabla_ {\\theta} \\mathcal {J} (\\theta) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ \\hat {A} _ {i} \\cdot \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\right] \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.344, + 0.825, + 0.373 + ], + "angle": 0, + "content": "Now consider the magnitude of the gradient vector. The strength of the training signal from \\(d\\) depends on the expected norm of the gradient, which is bounded below by:" + }, + { + "type": "equation", + "bbox": [ + 0.289, + 0.378, + 0.707, + 0.406 + ], + "angle": 0, + "content": "\\[\n\\left\\| \\nabla_ {\\theta} \\mathcal {J} (\\theta) \\right\\| \\gtrsim \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\cdot \\| \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\| \\right] \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.415, + 0.825, + 0.444 + ], + "angle": 0, + "content": "Assuming that \\(\\| \\nabla_{\\theta}\\log \\pi_{\\theta}(o_i\\mid x)\\|\\) is bounded and varies slowly across \\(d\\), the dominant term affecting the gradient norm is:" + }, + { + "type": "equation", + "bbox": [ + 0.405, + 0.443, + 0.591, + 0.469 + ], + "angle": 0, + "content": "\\[\n\\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\right] \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.477, + 0.825, + 0.52 + ], + "angle": 0, + "content": "Thus, the expected absolute advantage serves as a proxy for the learning signal magnitude contributed by distribution \\( d \\). The expected absolute advantage reflects how much training on distribution \\( d \\) can improve the model parameters, making it a suitable signal for curriculum scheduling." + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.525, + 0.824, + 0.537 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.558, + 0.749, + 0.576 + ], + "angle": 0, + "content": "B Theoretical Justification for UCB-Based Distribution Scheduling" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.589, + 0.825, + 0.632 + ], + "angle": 0, + "content": "We provide a theoretical justification for using Upper Confidence Bound (UCB) as a strategy for scheduling training over data distributions in RL-based post-training. Our objective is to maximize the cumulative learnability gain over \\(T\\) training steps, defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.279, + 0.637, + 0.717, + 0.678 + ], + "angle": 0, + "content": "\\[\n\\max _ {\\{d _ {t} \\} _ {t = 1} ^ {T}} \\sum_ {t = 1} ^ {T} L (d _ {t}), \\quad \\text {w h e r e} \\quad L (d) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} (o) | \\right] \\right].\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.69, + 0.825, + 0.761 + ], + "angle": 0, + "content": "This setting can be viewed as a stochastic multi-armed bandit (MAB) problem, where each data distribution \\( d_{j} \\in \\mathcal{D} \\) corresponds to an arm with unknown reward \\( L(d_{j}) \\), interpreted as the expected absolute advantage from training on samples from \\( d_{j} \\). At each training step \\( t \\), the learner selects a distribution \\( d_{t} \\) and obtains an empirical reward \\( \\hat{L}(d_{t}) \\) by averaging the absolute advantages observed in the batch." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.769, + 0.825, + 0.797 + ], + "angle": 0, + "content": "We define the regret as the gap between the cumulative learnability gain of the best fixed distribution \\( d^{*} = \\arg \\max_{d}L(d) \\) and that of the learner's actual selections:" + }, + { + "type": "equation", + "bbox": [ + 0.373, + 0.803, + 0.623, + 0.845 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e g r e t} (T) = \\sum_ {t = 1} ^ {T} L \\left(d ^ {*}\\right) - \\sum_ {t = 1} ^ {T} L \\left(d _ {t}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.856, + 0.564, + 0.872 + ], + "angle": 0, + "content": "To analyze this regret, we make the following assumptions:" + }, + { + "type": "text", + "bbox": [ + 0.211, + 0.883, + 0.825, + 0.914 + ], + "angle": 0, + "content": "1. For each distribution \\(d_{j}\\), the per-output absolute advantages \\(|\\hat{A}(o)|\\), where \\(o \\sim \\pi_{\\theta}(\\cdot|x)\\), are i.i.d. and bounded in \\([0, C]\\) for some constant \\(C > 0\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.209, + 0.092, + 0.825, + 0.121 + ], + "angle": 0, + "content": "2. The true expected advantage \\( L(d_{j}) \\) remains approximately stationary over a local training window, enabling meaningful online adaptation." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.134, + 0.825, + 0.178 + ], + "angle": 0, + "content": "Note: In practice, we can clip or normalize \\( |\\hat{A}(o)| \\) to satisfy the boundedness condition. The introduction of the constant \\( C \\) only scales the regret by a constant factor and does not affect the asymptotic rate of convergence." + }, + { + "type": "text", + "bbox": [ + 0.172, + 0.183, + 0.566, + 0.198 + ], + "angle": 0, + "content": "Under these assumptions, the following regret bound holds:" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.2, + 0.825, + 0.243 + ], + "angle": 0, + "content": "Theorem B.1. Let \\(\\mathcal{D} = \\{d_1, \\ldots, d_N\\}\\) be a set of data distributions with fixed expected rewards \\(L(d_j) \\in [0, C]\\). Then, applying the UCB1 algorithm to the empirical reward observations yields the regret bound:" + }, + { + "type": "equation", + "bbox": [ + 0.26, + 0.251, + 0.737, + 0.299 + ], + "angle": 0, + "content": "\\[\nR e g r e t (T) \\leq O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right), \\quad w h e r e \\quad \\Delta_ {j} = L \\left(d ^ {*}\\right) - L \\left(d _ {j}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.312, + 0.825, + 0.356 + ], + "angle": 0, + "content": "Proof. This result is a direct application of the classical UCB1 regret bound [31], extended to the case where reward values lie in \\([0, C]\\). Let \\( d^{*} = \\arg \\max_{d} L(d) \\) be the optimal distribution, and let \\( \\Delta_{j} = L(d^{*}) - L(d_{j}) \\) denote the suboptimality gap for each arm \\( d_{j} \\)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.36, + 0.794, + 0.376 + ], + "angle": 0, + "content": "At each time step \\( t \\), UCB1 selects the distribution \\( d_{j} \\) with the highest upper confidence bound:" + }, + { + "type": "equation", + "bbox": [ + 0.382, + 0.383, + 0.617, + 0.423 + ], + "angle": 0, + "content": "\\[\n\\mathbf {U C B} (d _ {j}) = \\hat {L} (d _ {j}) + \\sqrt {\\frac {2 C ^ {2} \\log t}{n _ {j}}},\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.432, + 0.825, + 0.462 + ], + "angle": 0, + "content": "where \\( n_j \\) is the number of times distribution \\( d_j \\) has been sampled so far, and \\( \\hat{L}(d_j) \\) is the empirical mean of observed rewards (mean absolute advantages)." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.467, + 0.827, + 0.524 + ], + "angle": 0, + "content": "Under the assumptions that rewards are i.i.d. and bounded in \\([0, C]\\), the Hoeffding inequality guarantees that with high probability the empirical mean concentrates around the true mean \\(L(d_{j})\\), and the UCB selection mechanism will only pick suboptimal arms a logarithmic number of times. Based on UCB1 regret bound [31], The cumulative regret is therefore bounded by:" + }, + { + "type": "equation", + "bbox": [ + 0.343, + 0.53, + 0.654, + 0.57 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e g r e t} (T) \\leq \\sum_ {j: \\Delta_ {j} > 0} \\left(\\frac {8 C ^ {2} \\log T}{\\Delta_ {j}} + O (\\Delta_ {j})\\right),\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.576, + 0.492, + 0.591 + ], + "angle": 0, + "content": "which simplifies to the stated asymptotic bound:" + }, + { + "type": "equation", + "bbox": [ + 0.37, + 0.598, + 0.626, + 0.647 + ], + "angle": 0, + "content": "\\[\n\\operatorname {R e g r e t} (T) = O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right).\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.681, + 0.825, + 0.739 + ], + "angle": 0, + "content": "This result shows that our distribution-level scheduling strategy, when driven by UCB over empirical advantage rewards, is provably efficient. It dynamically concentrates training on distributions with high estimated learnability while ensuring sufficient exploration, with regret that scales logarithmically in \\( T \\) and linearly in \\( 1 / \\Delta_j \\)." + }, + { + "type": "title", + "bbox": [ + 0.171, + 0.757, + 0.523, + 0.774 + ], + "angle": 0, + "content": "C Comparison to Heuristic Curriculum" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.788, + 0.827, + 0.913 + ], + "angle": 0, + "content": "Heuristic curricula, which manually specify a fixed training schedule over data distributions—e.g., training on Distribution A for N steps before switching to Distribution B—have been explored in prior work [11, 10], particularly in environments where task difficulty or domain progression is well understood. However, such approaches have several limitations that make them less suitable for our setting. First, effective heuristic scheduling requires strong prior knowledge about the relative difficulty and learnability of each distribution. In our setting, which involves diverse domains such as logic reasoning, mathematics, and programming, such prior knowledge is often unavailable or misleading. For example, a distribution may appear \"easier\" but provide low learning signal, or seem \"harder\" but actually yield high gradient utility. This makes it extremely difficult to construct" + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.194, + 0.096, + 0.307, + 0.108 + ], + "angle": 0, + "content": "Example of Prompt" + }, + { + "type": "text", + "bbox": [ + 0.19, + 0.112, + 0.82, + 0.171 + ], + "angle": 0, + "content": "You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Now the user asks you to solve a reasoning problem. After thinking, when you finally reach a conclusion, clearly state the identity of each character within tags. [Problem]" + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.183, + 0.615, + 0.198 + ], + "angle": 0, + "content": "Figure 3: Example of prompt used." + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.224, + 0.825, + 0.323 + ], + "angle": 0, + "content": "robust, generalizable heuristics across tasks. Second, heuristic curricula are static and cannot adapt to the evolving needs of the model during training. In contrast, DUMP dynamically adjusts sampling priorities based on actual model performance—measured via policy advantages—allowing it to focus on the most beneficial distributions at each stage of learning. Finally, the lack of standardized or widely accepted heuristic curricula for our task suite makes it hard to conduct fair and meaningful comparisons. Instead, we benchmark DUMP against uniform sampling and adaptive baselines, which are more reflective of current best practices in large-scale post-training pipelines." + }, + { + "type": "title", + "bbox": [ + 0.172, + 0.341, + 0.308, + 0.357 + ], + "angle": 0, + "content": "D Limitations" + }, + { + "type": "text", + "bbox": [ + 0.171, + 0.372, + 0.825, + 0.442 + ], + "angle": 0, + "content": "First, while the core idea of distribution-level curriculum learning is broadly applicable, we evaluate DUMP only in the context of large language models (LLMs) and do not extend the experiments to multimodal large language models (MLLMs) due to computational constraints. Second, our experiments are limited to 7B-scale models. Scaling our method to larger models remains an important direction for future work." + }, + { + "type": "page_number", + "bbox": [ + 0.491, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "15" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_origin.pdf b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..c9430a59f9522c09d0718ae87353b75bdd355553 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/53b04c7d-3ee3-4a55-8055-77e692101b62_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b7af9f6b8ed112ef6e91e90f68fdfde30fa61dea911ecc0bff0d7dab192958ae +size 12132123 diff --git a/data/2025/2504_09xxx/2504.09710/full.md b/data/2025/2504_09xxx/2504.09710/full.md new file mode 100644 index 0000000000000000000000000000000000000000..3d594af8120b15d2467c02e0ba366b34eec31047 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/full.md @@ -0,0 +1,390 @@ +# DUMP: Automated Distribution-Level Curriculum Learning for RL-based LLM Post-training + +Zhenting Wang + +Guofeng Cui + +Yu-Jhe Li + +Kun Wan\* + +Wentian Zhao\* + +$^{1}$ Rutgers University $^{2}$ Adobe Inc. + +# Abstract + +Recent advances in reinforcement learning (RL)-based post-training have led to notable improvements in large language models (LLMs), particularly in enhancing their reasoning capabilities to handle complex tasks. However, most existing methods treat the training data as a unified whole, overlooking the fact that modern LLM training often involves a mixture of data from diverse distributions—varying in both source and difficulty. This heterogeneity introduces a key challenge: how to adaptively schedule training across distributions to optimize learning efficiency. In this paper, we present a principled curriculum learning framework grounded in the notion of distribution-level learnability. Our core insight is that the magnitude of policy advantages reflects how much a model can still benefit from further training on a given distribution. Based on this, we propose a distribution-level curriculum learning framework for RL-based LLM post-training, which leverages the Upper Confidence Bound (UCB) principle to dynamically adjust sampling probabilities for different distributions. This approach prioritizes distributions with either high average advantage (exploitation) or low sample count (exploration), yielding an adaptive and theoretically grounded training schedule. We instantiate our curriculum learning framework with GRPO as the underlying RL algorithm and demonstrate its effectiveness on logic reasoning datasets with multiple difficulties and sources. Our experiments show that our framework significantly improves convergence speed and final performance, highlighting the value of distribution-aware curriculum strategies in LLM post-training. Code: https://github.com/ZhentingWang/DUMP. + +# 1 Introduction + +Reinforcement learning (RL)-based post-training has emerged as a powerful approach for enhancing the capabilities of large language models (LLMs), particularly in areas requiring structured reasoning, multi-step inference, and task-specific generalization [1-4]. By leveraging reward signals derived from task performance, human feedback, or domain-specific metrics, RL provides a flexible alternative to supervised fine-tuning. Unlike imitation-based methods that merely mimic reference outputs, RL-based approaches allow models to optimize directly toward behavioral objectives, making them especially effective for boosting model performance on complex reasoning and agentic tasks. + +While RL-based post-training has become a key technique for enhancing LLM capabilities in reasoning, alignment, and coding, one foundational challenge remains underexplored: how to dynamically schedule training across heterogeneous data distributions. In practice, LLMs are post-trained on datasets drawn from a wide variety of sources—ranging from factual QA to math problems and coding tasks—each differing in knowledge/capability relevance, and learning difficulty [5-7]. This heterogeneity is evident in large-scale post-training datasets such as Tulu 3 [7], where prompts span general dialogue, logic puzzles, STEM problems, and multilingual instructions, with + +widely varying counts, formats, and alignment objectives. More recently, next-generation post-training pipelines (e.g., Seed-Thinking v1.5 [8]) have shifted toward synthetic data generation with controllable parameters—e.g., configuring logical puzzle difficulty. This allows fine-grained control over the data distribution, making distribution-level curriculum learning both feasible and increasingly important. Despite this, most RL-based pipelines still treat all data distributions equally—uniformly sampling tasks throughout training or relying on static, hand-designed curricula. This static treatment ignores the model's evolving learning needs and underutilizes the training budget. Moreover, it is difficult to handcraft effective curricula when the post-training data comes from multiple distributions lacking clear difficulty labels. As reinforcement learning becomes increasingly used in post-training and training costs continue to rise, a data-driven curriculum mechanism that dynamically prioritizes learnable distributions is not just desirable, but necessary. + +This motivates the need for automated distribution-level curriculum learning: a dynamic strategy that adjusts sampling probabilities across data distributions throughout training. While prior work has explored instance-level curricula based on sample difficulty [9], and static/heuristic multi-stage schedules have been applied in LLM post-training [10, 11], little attention has been paid to automated, distribution-level scheduling—especially in the context of RL for capability-oriented post-training. The central challenge lies in identifying signals that reflect the current learnability of each distribution and in designing algorithms that can stably and efficiently leverage these signals to guide sampling. + +In this paper, we present DUMP (Automated Distribution-level cUrriculumM learning for RL-based LLM Post-training), a simple but theoretically grounded approach to address this challenge. Our central insight is that the magnitude of policy advantages—the expected absolute difference between a model's predicted return and its baseline value—serves as a natural proxy for distribution-level learnability. High advantages on specific data distribution indicate underfitting and high potential for improvement on it, while low advantages suggest diminishing returns. Moreover, the statistical reliability of these advantage estimates improves with the number of samples drawn from each distribution. DUMP operationalizes this insight by using bandit-style Upper Confidence Bound (UCB) scores to schedule distribution sampling. It maintains a sliding window of recent advantage magnitudes for each distribution and computes a score that balances exploitation (high advantage) and exploration (low visitation). These scores are normalized via a softmax to form sampling weights, which are then used to generate training batches. Unlike fixed or heuristic curricula, DUMP adapts throughout training based on empirical signals, and can be seamlessly integrated into standard LLM RL pipelines. We instantiate DUMP with GRPO [3], but the method is compatible with any advantage-based RL algorithm. We evaluate DUMP on logic reasoning corpora. Our experiments show that DUMP significantly accelerates convergence and yields stronger performance compared to uniform sampling. Furthermore, we provide theoretical analysis that supports the use of absolute advantages as a surrogate for distribution-level learnability, formalizing its connection to sample efficiency and regret minimization. + +We summarize our contributions as follows. ① We highlight the underexplored challenge of curriculum learning at the distribution level for RL-based post-training aimed at capability enhancement. ② We propose DUMP, a theoretically grounded framework that leverages advantage-based UCB scores to adaptively guide training over data distributions. ③ We demonstrate DUMP's effectiveness through empirical results and theoretical analysis, showing that it enables faster, more efficient improvement on LLM capabilities. + +# 2 Background + +RL-based LLM Post-training. Reinforcement learning (RL) plays a central role in post-training large language models (LLMs), especially for tasks involving reasoning, subjective preference, or long-horizon control. The RLHF framework [1, 12-15] laid the foundation by aligning models using reward signals derived from human preferences. Beyond preference alignment, recent RL-based post-training approaches have notably enhanced LLMs' capabilities in complex reasoning tasks, particularly coding and mathematics. For instance, RL post-trained model OpenAI o1 [16], o3 [17, 18], DeepSeek-R1 [4] significantly outperform LLMs without RL post-training such as pre-trained versions of GPT-4o [19] and DeepSeek-V3 [20] on challenging mathematics and coding benchmarks (e.g., AIME [21] and Codeforces [22]). Proximal Policy Optimization (PPO) [23] is widely used in post-training due to its clipped objective, which stabilizes training by preventing large policy updates. PPO remains a strong baseline in many LLM alignment settings. Direct Preference Optimization (DPO) [2] simplifies the pipeline by replacing RL rollouts with a classification-style loss + +derived from a KL-constrained reward maximization objective. While DPO works well on pairwise preference data, it does not naturally support group-wise or comparative feedback. Group Relative Policy Optimization (GRPO) [3] addresses this limitation by leveraging group-based feedback. For each input prompt $x$ , GRPO samples a group of $G$ candidate outputs $\{o_1, \ldots, o_G\} \sim \pi_{\mathrm{ref}}(\cdot | x)$ from a frozen reference policy $\pi_{\mathrm{ref}}$ . Each output $o_i$ is assigned a reward $r_i$ , and the advantage of $o_i$ is computed by normalizing its reward relative to others in the group: + +$$ +\hat {A} _ {i} = \frac {r _ {i} - \operatorname {m e a n} \left(\left\{r _ {1} , \dots , r _ {G} \right\}\right)}{\operatorname {s t d} \left(\left\{r _ {1} , \dots , r _ {G} \right\}\right) + \epsilon}, \tag {1} +$$ + +where $\epsilon > 0$ is a small constant for numerical stability. These normalized advantages capture the relative quality of outputs within the group. The model policy $\pi_{\theta}$ is then updated by maximizing the following clipped surrogate objective: + +$$ +\mathcal {J} _ {\mathrm {G R P O}} (\theta) = \mathbb {E} _ {x, \left\{o _ {i} \right\}} \left[ \frac {1}{G} \sum_ {i = 1} ^ {G} \min \left(\frac {\pi_ {\theta} \left(o _ {i} \mid x\right)}{\pi_ {\mathrm {o l d}} \left(o _ {i} \mid x\right)} \hat {A} _ {i}, \operatorname {c l i p} \left(\frac {\pi_ {\theta} \left(o _ {i} \mid x\right)}{\pi_ {\mathrm {o l d}} \left(o _ {i} \mid x\right)}, 1 - \epsilon , 1 + \epsilon\right) \hat {A} _ {i}\right) - \beta \mathbb {D} _ {\mathrm {K L}} \left(\pi_ {\theta} \| \pi_ {\text {r e f}}\right) \right], \tag {2} +$$ + +where $\pi_{\theta}(o_i|x)$ is the probability assigned by the current model to output $o_i$ , $\pi_{\mathrm{old}}(o_i|x)$ is the same under the model from previous step, and $\pi_{\mathrm{ref}}(o_i|x)$ is that under the reference model. The first term inside the summation is a clipped policy ratio scaled by $\hat{A}_i$ , similar to PPO [23], which prevents overly large updates. The outer expectation is taken over prompts $x$ and their sampled output groups $\{o_i\}$ . The second term is a KL divergence penalty that regularizes the updated policy $\pi_{\theta}$ to stay close to $\pi_{\mathrm{ref}}$ , weighted by a hyperparameter $\beta$ . This formulation eliminates the need for an explicit value baseline and stabilizes training by comparing outputs within local groups. + +Curriculum Learning for RL. Curriculum learning [24, 25] organizes training by progressing from easy to hard examples. In RL, curricula often follow task complexity [26-28], or are learned via teacher-student frameworks modeled as partially observable Markov decision process [29, 30]. With the adoption of RL in LLM post-training, curriculum learning has shown potential for improving both training efficiency and model effectiveness. For example, Curri-DPO [9] constructs instance-level curricula by ranking preference pairs based on the score gap between preferred and dispreferred responses, introducing harder pairs gradually during DPO fine-tuning. Kimi k1.5 [10] and LogicRL [11], on the other hand, use manually defined heuristic curricula with fixed training stages, e.g., models are first trained on "easy" samples for a pre-specified number of steps, then switched to "hard" samples. These strategies rely on static schedules and heuristic difficulty labels, without adapting to the model's learning progress. While these works demonstrate the benefit of curriculum learning in LLM post-training, most existing approaches focus on instance-level difficulty or use static, manually designed strategies. In contrast, automatic curriculum learning at the distribution level, especially in RL-based post-training, remains underexplored. In this paper, we propose DUMP to fill this gap by adaptively scheduling training over distributions using advantage-based learnability signals. + +# 3 Method + +In this section, we introduce DUMP, a distribution-level curriculum learning framework for RL-based LLM post-training. We first introduce expected absolute advantage as a proxy for learnability, and formalize the scheduling problem as a multi-armed bandit. We then describe a UCB-based strategy to guide distribution selection, followed by the full implementation of DUMP. + +# 3.1 Measuring Learnability via Absolute Advantage + +We aim to dynamically assess the usefulness of different data distributions during LLM reinforcement learning post-training. Intuitively, a distribution is more useful (or "learnable") if the model can gain more from training on its samples. To help understand and measure the learnability of the data samples from different distributions, we provide the following theorem: + +Theorem 3.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy $\pi_{\theta}$ and a data distribution $d$ , the expected absolute advantage $\mathbb{E}_{x \sim d} \left[ \mathbb{E}_{o_i \sim \pi_{\theta}(\cdot | x)} \left[ |\hat{A}_i| \right] \right]$ serves as a proxy for how much that distribution $d$ can help the model improve, where the distribution $d$ consisting of prompts $x \sim d$ , each prompt has a group of sampled outputs $\{o_1, \ldots, o_n\}$ , and $\hat{A}_i$ denotes the advantage of output $o_i$ . + +The proof can be found in Appendix A. Intuitively, if training on a distribution results in a larger expected advantage magnitude, then that distribution is considered more learnable. The advantage function measures the deviation between an action's predicted value and its actual return; a large advantage—either positive or negative—indicates that the model's current policy is still far from optimal on those samples but has a large potential to improve. A small advantage magnitude does not necessarily imply mastery—it may also occur when a task is too difficult or noisy for the model to learn from effectively, resulting in weak or unstable learning signals. To capture this deviation in both directions, we take the absolute value of the advantage. Without this, positive and negative advantages within a batch may cancel out, masking the true extent of the model's uncertainty or suboptimality. By averaging the absolute advantage over multiple sampled outputs and prompts, we obtain a robust estimate of how much learning signal remains in a given distribution. This expected absolute advantage thus acts as a practical proxy for distribution-level learnability: it reflects how much the model can benefit from training on that distribution. It also has the strength of being lightweight to compute in RL pipelines, as advantage estimates are already generated during rollout. + +# 3.2 Formalizing Distribution-Level Curriculum Learning as Multi-armed Bandit + +We aim to design a curriculum learning strategy that dynamically allocates training focus across multiple data distributions to maximize overall model improvement. Let $\mathcal{D} = \{d_1, \dots, d_N\}$ be a set of data distributions. At each training step, we sample a batch of examples $\mathcal{B}_t$ by drawing prompts from these distributions according to a learnable sampling policy, and use the batch to update model parameters $\theta$ via reinforcement learning. The goal is to assign higher sampling probabilities to distributions that offer greater learning potential, thereby maximizing cumulative capability gain. + +As motivated in Theorem 3.1, we quantify the learning potential of a distribution $d$ via its expected absolute advantage, defined as $L(d) = \mathbb{E}_{x\sim d}\left[\mathbb{E}_{o\sim \pi_{\theta}(\cdot |x)}\left[\left|\hat{A}(o)\right|\right]\right]$ . Our objective is to dynamically adjust the sampling distribution over $\mathcal{D}$ such that, over the training horizon $T$ , we approximately maximize the total expected learnability gain $\sum_{t=1}^{T}\mathbb{E}_{d\sim P_t}[L(d)]$ , where $P_t$ is the sampling distribution at step $t$ . This setup resembles a multi-armed bandit (MAB) problem, where each distribution acts as an arm and its reward corresponds to its learnability. In this setting, the central challenge is to estimate and balance each distribution's potential: exploiting those with high observed advantage while still exploring under-sampled ones that may offer long-term benefit. To this end, we adopt the classic Upper Confidence Bound (UCB) principle [31], which provides theoretical guarantees for balancing exploration and exploitation in bandit problems. Specifically, UCB-based algorithms achieve sublinear regret compared to the optimal fixed-arm strategy, and we show in Appendix B that applying UCB on empirical advantage statistics yields a near-optimal schedule under mild assumptions. To allow smoother allocation of sampling probabilities without hard cutoffs and reducing variance in learning, we adopt a soft-selection mechanism: instead of choosing one distribution at each step, we compute a UCB score for every distribution and normalize the scores with a softmax function to obtain a sampling distribution. This soft-selection formulation preserves the spirit of UCB—higher scoring distributions are sampled more—but enables partial exploration of all arms, and it is easier to integrate into LLM training pipelines. The resulting sampling distribution provides a convex mixture over data sources, where each distribution $d_j$ is selected with probability. Each training batch is then composed by drawing examples from multiple distributions in proportion to their scores. To estimate learnability in practice, we maintain a sliding window $\mathcal{A}_{d_j}^w$ of recent absolute advantages for each distribution $d_j$ , and define its empirical reward as the mean absolute advantage: $\hat{L}(d_j) = \frac{1}{|\mathcal{A}_{d_j}^w|}\sum_{a\in \mathcal{A}_{d_j}^w}|a|$ . We also track the total number of samples drawn from each distribution $n_{d_j}$ , and the global sample count $n_{\mathrm{total}} = \sum_{j}n_{d_j}$ . The UCB score for each distribution is: + +$$ +\mathrm {U C B} \left(d _ {j}\right) = \hat {L} \left(d _ {j}\right) + \sqrt {\frac {2 \log \left(n _ {\text {t o t a l}} + 1\right)}{n _ {d _ {j}} + 1}} \tag {3} +$$ + +The first term encourages exploitation of distributions with high observed advantages, while the second term ensures sufficient exploration of rarely sampled distributions. To obtain the final sampling weights, we apply a softmax over the UCB scores. Specifically, the probability of selecting distribution $d_{j}$ is computed as: $P(d_{j}) = \frac{\exp(\mathrm{UCB}(d_{j}) / \tau)}{\sum_{j=1}^{N} \exp(\mathrm{UCB}(d_{j}) / \tau)}$ , where $\tau > 0$ is a temperature hyperparameter that controls the sharpness of the sampling distribution. A lower $\tau$ results in more peaked selection around the top-scoring distributions, while a higher $\tau$ leads to a smoother, more exploratory curriculum. This + +Algorithm 1 Automated Distribution-Level Curriculum Learning with UCB Sampling +Input: Dataset $\mathcal{D} = \{d_1,\dots ,d_N\}$ ; pre-trained model parameters $\theta$ +Output: Post-trained model parameters $\theta$ +1: function DUMP(D, $\theta$ +2: Initialize distribution-level statistics +3: for each $d_{j}\in \mathcal{D}$ do +4: $A_{dj}^{w}\gets []$ Sliding window for absolute advantages +5: $n_{d_j}\gets 0$ Total samples seen from $d_{j}$ +6: $P(d_{j})\leftarrow \frac{1}{N}$ Equal initial weights +7: for training step $t = 1,2,\ldots ,T$ do +8: Sample batch $\mathcal{B}_t$ from $\mathcal{D}$ according to $P(d_j)$ +9: Compute advantages $\hat{A} (o)$ for all $o\in \mathcal{B}_t$ via model rollout +10: for each $d_{j}$ with samples in $\mathcal{B}_t$ do +11: $n_{d_j}\gets n_{d_j} + |\mathcal{B}_{t,d_j}|$ Update sample count; $\mathcal{B}_{t,d_j}$ : subset of batch from $d_j$ +12: $\mathcal{A}_{dj}^{w}\gets \mathcal{A}_{dj}^{w}\cup \{\left|\hat{A} (o)\right|\mid x\in \mathcal{B}_{t,d_j},o\sim \pi_{\theta}(\cdot |x)\}$ Append new advantages from $d_j$ +13: $A_{dj}^{w}\gets A_{dj}^{w}[-k:]$ k: Window Size; Keep last $k$ elements +14: Compute UCB scores for each distribution +15: $n_{\mathrm{total}}\gets \sum_{d_j\in \mathcal{D}}n_{d_j}$ +16: for each $d_{j}\in \mathcal{D}$ do +17: $\hat{L} (d_j)\gets \frac{1}{|\mathcal{A}_{dj}^w|}\sum_{a\in \mathcal{A}_{dj}^w}a$ Mean of absolute advantages +18: UCB $(d_j)\gets \hat{L} (d_j) + \sqrt{\frac{2\log(n_{\mathrm{total}} + 1)}{n_{d_j} + 1}}$ Eq.3 +19: Update sampling distribution +20: $P(d_j)\gets \frac{\exp(UCB(d_j) / \tau)}{\sum_{j = 1}^{N}\exp(UCB(d_j) / \tau)}\quad \forall d_j\in \mathcal{D}$ △: temperature +21: Update $\theta$ using $\mathcal{B}_t$ with an RL algorithm (e.g., GRPO) +22: return $\theta$ + +bandit-based formulation provides a lightweight, adaptive, and reward-sensitive curriculum learning mechanism. It balances the need to focus on learnable distributions while avoiding premature neglect of underexplored ones. In the next section, we present the complete algorithmic implementation of DUMP, including its integration with rollout procedures and online statistics tracking. + +# 3.3 Algorithm + +The detailed curriculum learning procedure is illustrated in Algorithm 1. The algorithm takes as input a dataset $\mathcal{D} = \{d_1, \ldots, d_N\}$ composed of multiple distributions and returns the optimized model parameters $\theta$ through a reinforcement learning loop. In lines 3-6, we initialize per-distribution statistics: each distribution $d_j \in \mathcal{D}$ is associated with an empty sliding window $\mathcal{A}_{d_j}^w$ to store recent absolute advantages, a counter $n_{d_j}$ for tracking the number of samples drawn from $d_j$ , and an initial sampling probability $P(d_j) = \frac{1}{N}$ indicating uniform sampling. At each training step $t$ (line 8), a batch $\mathcal{B}_t$ is sampled according to the current distribution weights $P(d_j)$ . Advantages $\hat{A}(o)$ are then computed via model rollouts for each sampled output $o \in \mathcal{B}_t$ (line 9). For every distribution $d_j$ that contributes samples in the current batch, we update its sample count $n_{d_j}$ (line 11), append the corresponding advantages to its sliding window $\mathcal{A}_{d_j}^w$ (line 12), and truncate the window to retain only the most recent $k$ entries (300 by default) in line 13. This ensures that our estimate of per-distribution learnability remains up-to-date and robust to noise. In lines 15-18, we compute the Upper Confidence Bound (UCB) score $\mathrm{UCB}(d_j)$ for each distribution. The score consists of two terms: the empirical mean absolute advantage $\hat{L}(d_j)$ over the sliding window $\mathcal{A}_{d_j}^w$ , and an exploration bonus inversely proportional to the square root of the number of samples $n_{d_j}$ . This balances prioritization of distributions that are either highly learnable or underexplored. In line 20, the sampling probabilities $P(d_j)$ are updated by applying a softmax over the UCB scores with a temperature parameter $\tau$ (0.1 by default). Lower values of $\tau$ result in sharper distributions that concentrate more heavily on top-ranked distributions, while higher $\tau$ values induce a smoother, more exploratory curriculum. Finally, in line + +21, the model parameters $\theta$ are updated using the current batch $\mathcal{B}_t$ with a reinforcement learning algorithm such as GRPO. After $T$ steps, the algorithm returns the post-trained model $\theta$ , which has been adaptively guided to learn from the most informative distributions. + +# 4 Experiments and Results + +In this section, we first introduce our experiments setup including used models datasets and more implementation details. We then demonstrate the results for the effectiveness of our method DUMP. More discussion about the comparison to static heuristic curriculum [11, 10] can be found in Appendix C. + +# 4.1 Experiments Setup + +RL Algorithm and LLM Models. We use GRPO [3] as the underlying RL algorithm in our experiments, which is commonly used in capability-oriented LLM post-training [4]. We use Qwen2.5-7B-Instruct-1M [32] and Qwen2.5-3B-Instruct [32] in our experiments. + +Datasets and Settings. Multiple datasets are used in our experiments, including Knights and Knaves (K&K) puzzle dataset [33], RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench [39], GSM-8K [40], and AIME 1983-2024 [21]. In our experiments, we consider three different settings. The prompt template used in shown in Figure 3 in the Appendix. + +Setting 1: Post-training on $K \& K$ puzzles with varying character numbers. The Knights and Knaves (K&K) dataset [33] contains procedurally generated logic puzzles where each character is either a knight (always truthful) or a knave (always lying), and the goal is to infer each character's identity. The dataset supports fine-grained difficulty control by adjusting the number of characters. We generate puzzles with 3 to 14 characters, treating each character count as a separate distribution—yielding 12 distinct distributions. Each distribution includes 900 training and 100 test samples. We post-train Qwen2.5-7B-Instruct-1M on the combined dataset across all distributions. + +Setting 2: Post-training on diverse logic reasoning distributions. We perform post-training using a mixture of logic reasoning datasets, including RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench Geomotion [39], and Knights and Knaves (K&K) [33]. For RuleTaker, ProofWriter, and K&K, we further partition the data distributions by complexity levels: RuleTaker by 2, 3, and 5 required reasoning steps; ProofWriter by 3, 4, and 5 required reasoning steps; and K&K by the number of characters (3-7). In total, we construct 15 logic distributions, each containing 400 training samples. We use Qwen2.5-7B-Instruct-1M for this setting. + +Setting 3: Post-training on diverse math reasoning distributions. We also explore post-training on diverse math data. For AIME, we split the data into four distributions based on competition years—1983–1993, 1994–2004, 2005–2015, and 2016–2024—since problem styles evolve significantly over time. We also include GSM-8K as a complementary math dataset. This results in five math distributions in total, with 7473 (GSM-8K), 124, 194, 283, and 238 training samples, respectively. We use Qwen2.5-3B-Instruct for this setting. + +Reward Implementation. We adopt the rule-based reward mechanism Shao et al. [3] to provide stable and hack-resistant training signals during RL-based post-training and follow the detailed reward implementation in Logic-RL [11]. Specifically, each model response is expected to follow a structured format with the reasoning process enclosed in tags and the final answer enclosed in tags. The reward system consists of two components: + +- Format Reward. A binary reward based on whether the output strictly adheres to the expected format. If the model includes exactly one well-formed and one section in the correct order, it receives a reward of +1; otherwise, it receives a penalty of -1. +- Answer Reward. We evaluate the correctness of the final answer. If the predicted identities fully match the ground truth, the model receives a reward of $+2$ ; if the answer is incorrect, -1.5; and if the answer is missing or unparsable, -2. + +Other Implementation Details. All experiments are conducted on servers equipped with 8 Nvidia A100 GPUs. Our method is implemented with VeRL [41] LLM Reinforcement Learning framework. We use GRPO [3] as the training algorithm and follow standard practice for actor rollout and optimization. The actor learning rate is set to $1e - 6$ , training batch size is set to 128, and the PPO + +
Data Distributionwithout DUMPwith DUMP
RuleTaker 2 Steps0.790.79
RuleTaker 3 Steps0.761.02
RuleTaker 5 Steps0.560.98
ProofWriter 3 Steps1.181.09
ProofWriter 4 Steps0.971.09
ProofWriter 5 Steps1.241.05
AR-LSAT-0.70-0.52
LogiQA1.941.70
LogicNLI-0.29-0.23
LongICLBench Geomotion0.540.25
K & K 3 Characters2.002.00
K & K 4 Characters1.541.76
K & K 5 Characters1.531.84
K & K 6 Characters0.831.42
K & K 7 Characters0.561.02
Average0.901.17
+ +Table 1: Test Answer Reward (see Section 4.1) on diverse logic reasoning distributions (Setting 2). The model used here is Qwen2.5-7B-Instruct-1M. + +
Data Distributionwithout DUMPwith DUMP
GSM-8K1.501.47
AIME 1983-1993-0.76-0.39
AIME 1994-2004-1.50-1.02
AIME 2005-2015-0.94-0.94
AIME 2016-2024-1.27-1.27
Average-0.59-0.43
+ +Table 2: Test Answer Reward (see Section 4.1) on diverse math reasoning distributions (Setting 3). The model used here is Qwen2.5-3B-Instruct. + +mini-batch size is 32. KL divergence regularization is applied to encourage alignment with the reference policy, with a KL loss coefficient of 0.001. Each rollout batch contains 16 responses. If not specified, we allow for a maximum response length of 20480 and 4096 tokens during training for Qwen2.5-7B-Instruct-1M and Qwen2.5-3B-Instruct, respectively. The window size $k$ and the temperature $\tau$ in our curriculum learning framework is set to 300 and 0.1, respectively. + +# 4.2 Effectiveness of DUMP + +Setting 1: Post-training on the combination of K&K puzzle datasets with different number of characters. To evaluate the effectiveness of DUMP in improving post-training efficiency and performance, we compare it against a uniform distribution sampling baseline across 12 distinct data distributions in the K&K puzzle dataset. Each distribution corresponds to a fixed number of characters in the puzzle, ranging from 3 to 14. Figure 1 plots the test answer reward over training steps for each distribution, with and without DUMP. Across all distributions, DUMP consistently outperforms the baseline, achieving faster convergence and higher test performance. The gains are particularly notable in mid- to high-difficulty distributions (e.g., 6 to 12 characters), where uniform sampling tends to struggle due to data underutilization. For example, in the 9-character distribution (Figure 1g), the model trained with DUMP achieves a reward of over 0.5, whereas the baseline remains below 0.0. These results validate the core intuition of DUMP: dynamically adjusting the sampling focus toward high-learnability distributions accelerates policy improvement while avoiding wasted effort on over-saturated or low-signal data. Notably, the improvement is achieved without any curriculum heuristics or manual data ordering—only by observing advantage signals and adapting online. + +Setting 2: Post-training on diverse logic reasoning distributions. We apply DUMP to 15 logic reasoning distributions including subsets of RuleTaker, ProofWriter, and K&K (with varying difficulty levels), as well as datasets such as AR-LSAT, LogiQA, LogicNLI, and LongICLBench. As shown in Table 1, DUMP improves the average test answer reward from 0.90 to 1.17. Notable improvements are observed on complex tasks such as AR-LSAT, where the reward increases from -0.70 to -0.52, and K&K 7 Characters, from 0.56 to 1.02. These results demonstrate that DUMP adaptively prioritizes undertrained but learnable distributions, leading to more efficient capability gains. + +Setting 3: Post-training on diverse math data distributions. We further evaluate DUMP on GSM-8K and different subsets of AIME grouped by competition years. As shown in Table 2, DUMP raises the average test answer reward from -0.59 to -0.43, with the most significant gain on AIME 1994-2004, where performance improves from -1.50 to -1.02. These results highlight DUMP's robustness under distribution shifts and data imbalance. + +# 4.3 Ablation Study on the Sampling Strategy + +In this section, we ablate the sampling strategy used in DUMP's UCB-based scheduler. As described in Algorithm 1, our method applies soft sampling controlled by a temperature parameter. The greedy variant (temperature $= 0$ ) always selects the distribution with the highest UCB score, while our default uses a small temperature (0.1) to enable probabilistic sampling. We conduct experiments under Setting 1, with a maximum training response length of 10240 tokens. After 100 training steps, the + +![](images/2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg) + +![](images/9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg) + +![](images/b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg) + +![](images/b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg) +(a) 3 Characters + +![](images/dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg) +(b) 4 Characters + +![](images/90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg) +(c) 5 Characters + +![](images/7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg) +(d) 6 Characters + +![](images/e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg) +(e) 7 Characters + +![](images/93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg) +(f) 8 Characters + +![](images/c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg) +(g) 9 Characters +(j) 12 Characters +Figure 1: Effectiveness of DUMP on the K&K puzzle dataset mixed with 12 distributions defined by the number of characters in each puzzle (Setting 1). DUMP consistently achieves higher answer reward on test dataset compared to baseline. The model used here is Qwen2.5-7B-Instruct-1M. + +![](images/c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg) +(h) 10 Characters +(k) 13 Characters + +![](images/065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg) +(i) 11 Characters +(1) 14 Characters + +greedy strategy significantly underperforms due to its lack of exploration—it tends to lock onto a single distribution early and fails to adapt. For instance, on the 13- and 14-character K&K tasks, the greedy variant achieves test answer rewards of $-0.91$ and $-1.38$ , while soft sampling reaches $-0.66$ and $-1.16$ , respectively. These results highlight the importance of maintaining exploration via a non-zero temperature to prevent the scheduler from collapsing onto suboptimal distributions. + +# 4.4 Analyzing the Automated Curriculum by DUMP + +To understand how DUMP dynamically allocates training effort across data distributions, we analyze the sampling patterns induced by its UCB-based curriculum mechanism. Figure 2 shows the cumulative number of samples drawn from each distribution (3 to 14 characters) over the course of training on K&K puzzles with varying character numbers (Setting 1). We observe a clear curriculum-like progression: distributions corresponding to simpler puzzles (e.g., 3-5 characters) are heavily sampled in the early stages of training, while more complex distributions (e.g., 10-14 characters) are gradually introduced and increasingly prioritized as training progresses. This pattern aligns with the model's evolving capacity—early training favors distributions with high initial advantage magnitudes, and as the model saturates on those, DUMP shifts focus to underexplored but learnable distributions. Importantly, this adaptive sampling behavior emerges automatically from empirical advantage signals + +![](images/6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg) + +![](images/ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg) + +![](images/1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg) + +![](images/3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg) +(a) 3 Characters + +![](images/bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg) +(b) 4 Characters + +![](images/e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg) +(c) 5 Characters + +![](images/dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg) +(d) 6 Characters + +![](images/8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg) +(e) 7 Characters + +![](images/2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg) +(f) 8 Characters + +![](images/33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg) +(g) 9 Characters +(j) 12 Characters +Figure 2: Curriculum (sample counts) induced by DUMP across 12 K&K puzzle distributions with increasing difficulty defined by the number of characters in each puzzle (Setting 1). Simpler distributions are automatically prioritized in early training, while more complex ones are progressively emphasized—both in an entirely automated manner—demonstrating automated distribution scheduling. + +![](images/f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg) +(h) 10 Characters +(k) 13 Characters + +![](images/767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg) +(i) 11 Characters +(1) 14 Characters + +without requiring manual specification of curriculum order. These results highlight DUMP's ability to construct an implicit, data-driven curriculum that mirrors traditional easy-to-hard strategies, while remaining responsive to online training dynamics. + +# 5 Conclusion + +In this work, we introduce a distribution-level curriculum learning framework for RL-based posttraining of large language models. DUMP leverages the expected absolute advantage as a learnability signal to adaptively allocate training focus across heterogeneous distributions. By formalizing scheduling as a multi-armed bandit and adopting a UCB-based sampling strategy, DUMP balances exploitation and exploration in a principled way. Experiments demonstrate that DUMP consistently improves convergence and final performance over baselines. These results highlight the value of distribution-aware curriculum learning in LLM RL post-training. + +# References + +[1] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022. +[2] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023. +[3] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024. +[4] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025. +[5] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. The flan collection: Designing data and methods for effective instruction tuning. In International Conference on Machine Learning, pages 22631-22648. PMLR, 2023. +[6] Harrison Lee, Samrat Phatale, Hassan Mansoor, Thomas Mesnard, Johan Ferret, Kellie Lu, Colton Bishop, Ethan Hall, Victor Carbune, Abhinav Rastogi, et al. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv e-prints, pages arXiv-2309, 2023. +[7] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, et al. T\''ulu 3: Pushing frontiers in open language model post-training. arXiv preprint arXiv:2411.15124, 2024. +[8] ByteDance Seed. Seed-thinking-v1.5: Advancing superb reasoning models with reinforcement learning. Technical report, ByteDance, 2025. URL https://github.com/ByteDance-Seed/Seed-Thinking-v1.5. +[9] Pulkit Pattnaik, Rishabh Maheshwary, Kelechi Ogueji, Vikas Yadav, and Sathwik Tejaswi Madhusudhan. Enhancing alignment using curriculum learning & ranked preferences. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 12891-12907, 2024. +[10] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025. +[11] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025. +[12] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017. +[13] Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019. +[14] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022. + +[15] Amelia Glaese, Nat McAleese, Maja Trebacz, John Aslanides, Vlad Firoiu, Timo Ewalds, Maribeth Rauh, Laura Weidinger, Martin Chadwick, Phoebe Thacker, et al. Improving alignment of dialogue agents via targeted human judgements. arXiv preprint arXiv:2209.14375, 2022. +[16] OpenAI. Learning to reason with llms. Technical report, OpenAI, 2024. URL https://openai.com/index/learning-to-reason-with-llms/. +[17] OpenAI. Openai o3-mini. Technical report, OpenAI, 2025. URL https://openai.com/index/openai-o3-mini/. +[18] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaiev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025. +[19] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024. +[20] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024. +[21] Aime_1983_2024 (revision 6283828), 2025. URL https://huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024. +[22] Mikhail Mirzayanov. Codeforces. https://codeforces.com/. Accessed: 2025-04-13. +[23] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017. +[24] Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pages 41-48, 2009. +[25] Alex Graves, Marc G Bellemare, Jacob Menick, Remi Munos, and Koray Kavukcuoglu. Automated curriculum learning for neural networks. In international conference on machine learning, pages 1311-1320. Pmlr, 2017. +[26] Niels Justesen, Ruben Rodriguez Torrado, Philip Bontrager, Ahmed Khalifa, Julian Togelius, and Sebastian Risi. Illuminating generalization in deep reinforcement learning through procedural level generation. arXiv preprint arXiv:1806.10729, 2018. +[27] Rui Wang, Joel Lehman, Jeff Clune, and Kenneth O Stanley. Paired open-ended trailblazer (poet): Endlessly generating increasingly complex and diverse learning environments and their solutions. arXiv preprint arXiv:1901.01753, 2019. +[28] Richard Li, Allan Jabri, Trevor Darrell, and Pulkit Agrawal. Towards practical multi-object manipulation using relational reinforcement learning. In 2020 IEEE international conference on robotics and automation (icra), pages 4051-4058. IEEE, 2020. +[29] Tambet Matiisen, Avital Oliver, Taco Cohen, and John Schulman. Teacher-student curriculum learning. IEEE transactions on neural networks and learning systems, 31(9):3732-3740, 2019. +[30] Rémy Portelas, Cédric Colas, Katja Hofmann, and Pierre-Yves Oudeyer. Teacher algorithms for curriculum learning of deep rl in continuously parameterized environments. In Conference on Robot Learning, pages 835-853. PMLR, 2020. +[31] Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. Finite-time analysis of the multiarmed bandit problem. Machine learning, 47:235-256, 2002. +[32] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024. + +[33] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024. +[34] Peter Clark, Oyvind Tafjord, and Kyle Richardson. Transformers as soft reasoners over language. arXiv preprint arXiv:2002.05867, 2020. +[35] Oyvind Tafjord, Bhavana Dalvi Mishra, and Peter Clark. Proofwriter: Generating implications, proofs, and abductive statements over natural language. arXiv preprint arXiv:2012.13048, 2020. +[36] Wanjun Zhong, Siyuan Wang, Duyu Tang, Zenan Xu, Daya Guo, Jiahai Wang, Jian Yin, Ming Zhou, and Nan Duan. Ar-lsat: Investigating analytical reasoning of text. arXiv preprint arXiv:2104.06598, 2021. +[37] Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning. arXiv preprint arXiv:2007.08124, 2020. +[38] Jidong Tian, Yitian Li, Wenqing Chen, Liqiang Xiao, Hao He, and Yaohui Jin. Diagnosing the first-order logical reasoning ability through logicli. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3738-3747, 2021. +[39] Tianle Li, Ge Zhang, Quy Duc Do, Xiang Yue, and Wenhu Chen. Long-context llms struggle with long in-context learning. arXiv preprint arXiv:2404.02060, 2024. +[40] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021. +[41] Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024. + +# A Proof for Theorem 3.1 + +Theorem A.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy $\pi_{\theta}$ and a data distribution $d$ , the expected absolute advantage $\mathbb{E}_{x\sim d}\left[\mathbb{E}_{o_i\sim \pi_\theta (\cdot |x)}\left[|\hat{A}_i|\right]\right]$ serves as a proxy for how much that distribution $d$ can help the model improve, where the distribution $d$ consisting of prompts $x\sim d$ , each prompt has a group of sampled outputs $\{o_1,\ldots ,o_n\}$ , and $\hat{A}_i$ denotes the advantage of output $o_i$ . + +Proof. Let $\pi_{\theta}$ be the current model policy. Consider a data distribution $d$ , where $x \sim d$ are prompts and $\{o_1, \ldots, o_n\} \sim \pi_{\theta}(\cdot | x)$ are sampled outputs. For each output $o_i$ , the advantage is estimated as + +$$ +\hat {A} _ {i} = r _ {i} - b (x), +$$ + +where $r_i$ is the reward assigned to $o_i$ and $b(x)$ is a baseline (e.g., the mean reward over the group). The policy gradient under common policy-gradient methods (e.g., PPO or GRPO) can be written as: + +$$ +\nabla_ {\theta} \mathcal {J} (\theta) = \mathbb {E} _ {x \sim d} \left[ \mathbb {E} _ {o _ {i} \sim \pi_ {\theta} (\cdot | x)} \left[ \hat {A} _ {i} \cdot \nabla_ {\theta} \log \pi_ {\theta} (o _ {i} \mid x) \right] \right]. +$$ + +Now consider the magnitude of the gradient vector. The strength of the training signal from $d$ depends on the expected norm of the gradient, which is bounded below by: + +$$ +\left\| \nabla_ {\theta} \mathcal {J} (\theta) \right\| \gtrsim \mathbb {E} _ {x \sim d} \left[ \mathbb {E} _ {o _ {i} \sim \pi_ {\theta} (\cdot | x)} \left[ | \hat {A} _ {i} | \cdot \| \nabla_ {\theta} \log \pi_ {\theta} (o _ {i} \mid x) \| \right] \right]. +$$ + +Assuming that $\| \nabla_{\theta}\log \pi_{\theta}(o_i\mid x)\|$ is bounded and varies slowly across $d$ , the dominant term affecting the gradient norm is: + +$$ +\mathbb {E} _ {x \sim d} \left[ \mathbb {E} _ {o _ {i} \sim \pi_ {\theta} (\cdot | x)} \left[ | \hat {A} _ {i} | \right] \right]. +$$ + +Thus, the expected absolute advantage serves as a proxy for the learning signal magnitude contributed by distribution $d$ . The expected absolute advantage reflects how much training on distribution $d$ can improve the model parameters, making it a suitable signal for curriculum scheduling. + +![](images/ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg) + +# B Theoretical Justification for UCB-Based Distribution Scheduling + +We provide a theoretical justification for using Upper Confidence Bound (UCB) as a strategy for scheduling training over data distributions in RL-based post-training. Our objective is to maximize the cumulative learnability gain over $T$ training steps, defined as: + +$$ +\max _ {\{d _ {t} \} _ {t = 1} ^ {T}} \sum_ {t = 1} ^ {T} L (d _ {t}), \quad \text {w h e r e} \quad L (d) = \mathbb {E} _ {x \sim d} \left[ \mathbb {E} _ {o \sim \pi_ {\theta} (\cdot | x)} \left[ | \hat {A} (o) | \right] \right]. +$$ + +This setting can be viewed as a stochastic multi-armed bandit (MAB) problem, where each data distribution $d_{j} \in \mathcal{D}$ corresponds to an arm with unknown reward $L(d_{j})$ , interpreted as the expected absolute advantage from training on samples from $d_{j}$ . At each training step $t$ , the learner selects a distribution $d_{t}$ and obtains an empirical reward $\hat{L}(d_{t})$ by averaging the absolute advantages observed in the batch. + +We define the regret as the gap between the cumulative learnability gain of the best fixed distribution $d^{*} = \arg \max_{d}L(d)$ and that of the learner's actual selections: + +$$ +\operatorname {R e g r e t} (T) = \sum_ {t = 1} ^ {T} L \left(d ^ {*}\right) - \sum_ {t = 1} ^ {T} L \left(d _ {t}\right). +$$ + +To analyze this regret, we make the following assumptions: + +1. For each distribution $d_{j}$ , the per-output absolute advantages $|\hat{A}(o)|$ , where $o \sim \pi_{\theta}(\cdot|x)$ , are i.i.d. and bounded in $[0, C]$ for some constant $C > 0$ . + +2. The true expected advantage $L(d_{j})$ remains approximately stationary over a local training window, enabling meaningful online adaptation. + +Note: In practice, we can clip or normalize $|\hat{A}(o)|$ to satisfy the boundedness condition. The introduction of the constant $C$ only scales the regret by a constant factor and does not affect the asymptotic rate of convergence. + +Under these assumptions, the following regret bound holds: + +Theorem B.1. Let $\mathcal{D} = \{d_1, \ldots, d_N\}$ be a set of data distributions with fixed expected rewards $L(d_j) \in [0, C]$ . Then, applying the UCB1 algorithm to the empirical reward observations yields the regret bound: + +$$ +R e g r e t (T) \leq O \left(C \cdot \sum_ {j: \Delta_ {j} > 0} \frac {\log T}{\Delta_ {j}}\right), \quad w h e r e \quad \Delta_ {j} = L \left(d ^ {*}\right) - L \left(d _ {j}\right). +$$ + +Proof. This result is a direct application of the classical UCB1 regret bound [31], extended to the case where reward values lie in $[0, C]$ . Let $d^{*} = \arg \max_{d} L(d)$ be the optimal distribution, and let $\Delta_{j} = L(d^{*}) - L(d_{j})$ denote the suboptimality gap for each arm $d_{j}$ . + +At each time step $t$ , UCB1 selects the distribution $d_{j}$ with the highest upper confidence bound: + +$$ +\mathbf {U C B} (d _ {j}) = \hat {L} (d _ {j}) + \sqrt {\frac {2 C ^ {2} \log t}{n _ {j}}}, +$$ + +where $n_j$ is the number of times distribution $d_j$ has been sampled so far, and $\hat{L}(d_j)$ is the empirical mean of observed rewards (mean absolute advantages). + +Under the assumptions that rewards are i.i.d. and bounded in $[0, C]$ , the Hoeffding inequality guarantees that with high probability the empirical mean concentrates around the true mean $L(d_{j})$ , and the UCB selection mechanism will only pick suboptimal arms a logarithmic number of times. Based on UCB1 regret bound [31], The cumulative regret is therefore bounded by: + +$$ +\operatorname {R e g r e t} (T) \leq \sum_ {j: \Delta_ {j} > 0} \left(\frac {8 C ^ {2} \log T}{\Delta_ {j}} + O (\Delta_ {j})\right), +$$ + +which simplifies to the stated asymptotic bound: + +$$ +\operatorname {R e g r e t} (T) = O \left(C \cdot \sum_ {j: \Delta_ {j} > 0} \frac {\log T}{\Delta_ {j}}\right). +$$ + +This result shows that our distribution-level scheduling strategy, when driven by UCB over empirical advantage rewards, is provably efficient. It dynamically concentrates training on distributions with high estimated learnability while ensuring sufficient exploration, with regret that scales logarithmically in $T$ and linearly in $1 / \Delta_j$ . + +# C Comparison to Heuristic Curriculum + +Heuristic curricula, which manually specify a fixed training schedule over data distributions—e.g., training on Distribution A for N steps before switching to Distribution B—have been explored in prior work [11, 10], particularly in environments where task difficulty or domain progression is well understood. However, such approaches have several limitations that make them less suitable for our setting. First, effective heuristic scheduling requires strong prior knowledge about the relative difficulty and learnability of each distribution. In our setting, which involves diverse domains such as logic reasoning, mathematics, and programming, such prior knowledge is often unavailable or misleading. For example, a distribution may appear "easier" but provide low learning signal, or seem "harder" but actually yield high gradient utility. This makes it extremely difficult to construct + +# Example of Prompt + +You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Now the user asks you to solve a reasoning problem. After thinking, when you finally reach a conclusion, clearly state the identity of each character within tags. [Problem] + +Figure 3: Example of prompt used. + +robust, generalizable heuristics across tasks. Second, heuristic curricula are static and cannot adapt to the evolving needs of the model during training. In contrast, DUMP dynamically adjusts sampling priorities based on actual model performance—measured via policy advantages—allowing it to focus on the most beneficial distributions at each stage of learning. Finally, the lack of standardized or widely accepted heuristic curricula for our task suite makes it hard to conduct fair and meaningful comparisons. Instead, we benchmark DUMP against uniform sampling and adaptive baselines, which are more reflective of current best practices in large-scale post-training pipelines. + +# D Limitations + +First, while the core idea of distribution-level curriculum learning is broadly applicable, we evaluate DUMP only in the context of large language models (LLMs) and do not extend the experiments to multimodal large language models (MLLMs) due to computational constraints. Second, our experiments are limited to 7B-scale models. Scaling our method to larger models remains an important direction for future work. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09710/images/065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg b/data/2025/2504_09xxx/2504.09710/images/065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9938fea5225fe428e829f5a98f8b7b8a7c5dc376 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f04455f73b0eb7da515d71734b4cb89f02d8e1a485415d1fde369e8616842d10 +size 12122 diff --git a/data/2025/2504_09xxx/2504.09710/images/0f49106ee4483579e10efda00fc1881b3a87c0a27aad840d46e59cb4e38f969f.jpg b/data/2025/2504_09xxx/2504.09710/images/0f49106ee4483579e10efda00fc1881b3a87c0a27aad840d46e59cb4e38f969f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f4d4ab635e5aee891f125b9a4438c123c3288518 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/0f49106ee4483579e10efda00fc1881b3a87c0a27aad840d46e59cb4e38f969f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:108416a7a692c56b4bee786ded20cd5133575d74d5bd0733f7763618f00e8f5e +size 4034 diff --git a/data/2025/2504_09xxx/2504.09710/images/1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg b/data/2025/2504_09xxx/2504.09710/images/1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f8c8388297d388c3a940ac7e68739e1d679031d8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d5dd393a415ca611a964a4f692479a543f30d478449423bdfec82bfd6588065d +size 9452 diff --git a/data/2025/2504_09xxx/2504.09710/images/1226dd8c5b516281dc668f8a76af77e66716029ddc4798fe57b30d5decfe1308.jpg b/data/2025/2504_09xxx/2504.09710/images/1226dd8c5b516281dc668f8a76af77e66716029ddc4798fe57b30d5decfe1308.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e7a6cc07601dcdd9ea4fbc3cb441e8314d3e7bb8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/1226dd8c5b516281dc668f8a76af77e66716029ddc4798fe57b30d5decfe1308.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b913b1f196ca9b2331a3b5153b40ca33c0cb18dd873386e3cd5dd5eb5c135487 +size 9891 diff --git a/data/2025/2504_09xxx/2504.09710/images/2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg b/data/2025/2504_09xxx/2504.09710/images/2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb96ec996f1b30e878d60c4a104f95684d5419e8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c81985ec9f8e25bf00555d619dbdf4b66b1446a536e9d9d855af62db6d8625ff +size 8943 diff --git a/data/2025/2504_09xxx/2504.09710/images/2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg b/data/2025/2504_09xxx/2504.09710/images/2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..204838bb8cf7edb08b96d2696c0304f9a2388f7d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:723ecd6c1175d40b75e2eda7b92955dd8dbe13afa77caa35ad6b7145736d0640 +size 13149 diff --git a/data/2025/2504_09xxx/2504.09710/images/33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg b/data/2025/2504_09xxx/2504.09710/images/33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4789fe1004135412b1aa6aa64e73d1bba68e73ff --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51cdaa92d2ef90a5e08eb5c7d19a82e71536381354229fa78158d862e207afdf +size 10255 diff --git a/data/2025/2504_09xxx/2504.09710/images/3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg b/data/2025/2504_09xxx/2504.09710/images/3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ba599ae91efabed780d4156aa3b141061a405dd8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:93148ff42d7c257eb19ab8293c9e0a766fcbf31639b85f1debf1de811eb02cb7 +size 9562 diff --git a/data/2025/2504_09xxx/2504.09710/images/4829e0d465ea4f5e6dd0b5ae67f449549fb179cafe54cc73f5ec255995575a04.jpg b/data/2025/2504_09xxx/2504.09710/images/4829e0d465ea4f5e6dd0b5ae67f449549fb179cafe54cc73f5ec255995575a04.jpg new file mode 100644 index 0000000000000000000000000000000000000000..098d348fccb6ed0387e54d04214713dcb59ca8f3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/4829e0d465ea4f5e6dd0b5ae67f449549fb179cafe54cc73f5ec255995575a04.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df988ac32010665dfdaeffad8f3c7632a74ca2a37efd06324527c00cde54d6e5 +size 6996 diff --git a/data/2025/2504_09xxx/2504.09710/images/5e98d8f8024b264445999ca7a7e4f5e5107c60f782875c981cbcda40fc3bb2ed.jpg b/data/2025/2504_09xxx/2504.09710/images/5e98d8f8024b264445999ca7a7e4f5e5107c60f782875c981cbcda40fc3bb2ed.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f05104f465d2dc3242afc753b3db0d4a72f31ae7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/5e98d8f8024b264445999ca7a7e4f5e5107c60f782875c981cbcda40fc3bb2ed.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:29b45e2885d59eefb7bccc7b5f2bf1016a21901f1528662b5d21800963962054 +size 7023 diff --git a/data/2025/2504_09xxx/2504.09710/images/625199cc1fa3533c68203d2c91a06e76acecc9cb32fe48d4eb476ca018029b80.jpg b/data/2025/2504_09xxx/2504.09710/images/625199cc1fa3533c68203d2c91a06e76acecc9cb32fe48d4eb476ca018029b80.jpg new file mode 100644 index 0000000000000000000000000000000000000000..892a2c35c1553052787c4dc5ac63f1c18e95d360 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/625199cc1fa3533c68203d2c91a06e76acecc9cb32fe48d4eb476ca018029b80.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b53cd559019dc5fe0aaa0446302b6a7d96a8887c2c4c99eab5ad340beb89fb36 +size 5694 diff --git a/data/2025/2504_09xxx/2504.09710/images/6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg b/data/2025/2504_09xxx/2504.09710/images/6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg new file mode 100644 index 0000000000000000000000000000000000000000..238542f2a3d59a671d63e21a3abc93e029160c9f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f8da8a9bd92f640bc8b20687414459534e0c70f7126139cd5a476052100e0571 +size 9476 diff --git a/data/2025/2504_09xxx/2504.09710/images/75809fff8a862bfb1cb12056d3e489ba26f74e510e9defb68d0e26568b6acb1d.jpg b/data/2025/2504_09xxx/2504.09710/images/75809fff8a862bfb1cb12056d3e489ba26f74e510e9defb68d0e26568b6acb1d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..50d1be87d05fe33c755ba1239dbc868ec724cd2d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/75809fff8a862bfb1cb12056d3e489ba26f74e510e9defb68d0e26568b6acb1d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:262ea631d1d7008dd2b98350bac6b3fc77d8c3da1c03a0f634cc690f9101362f +size 6478 diff --git a/data/2025/2504_09xxx/2504.09710/images/767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg b/data/2025/2504_09xxx/2504.09710/images/767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68d785ba4d093c98e8eac24feecd14a3fd7fa789 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c143a451c91b8656e1279b96b6ae8c118bf56879792385b917752f0f641d2ac8 +size 9688 diff --git a/data/2025/2504_09xxx/2504.09710/images/79f9c919c43934b19c402e03573f142657eebe1ce865728baa48e24d0c55be98.jpg b/data/2025/2504_09xxx/2504.09710/images/79f9c919c43934b19c402e03573f142657eebe1ce865728baa48e24d0c55be98.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f65f432feacd909e99e20e71510c00afd7015623 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/79f9c919c43934b19c402e03573f142657eebe1ce865728baa48e24d0c55be98.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7d833583e1c1591feb655c225c605497846b18fd0e7f17eba39678de50d494ff +size 2123 diff --git a/data/2025/2504_09xxx/2504.09710/images/7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg b/data/2025/2504_09xxx/2504.09710/images/7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0096e5d6a9259b7cd0ab5c2360c09be06b174258 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fc93b930fbb04a94caa42022297486fe9d8a92983ced16243aa5adb81ea53a36 +size 13056 diff --git a/data/2025/2504_09xxx/2504.09710/images/8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg b/data/2025/2504_09xxx/2504.09710/images/8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..402ed79cab7f58d12322dc6d28d4352946d4c19a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:510aab38ddc4c08f7c46da8dc979fc82f6de7ac133071b30d00b7ed5e2d8ddc0 +size 9017 diff --git a/data/2025/2504_09xxx/2504.09710/images/90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg b/data/2025/2504_09xxx/2504.09710/images/90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1223460a1788e3dec315c17da6e452920a335210 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:446dc3b0914da0dabb45f50c7ba261178a587465d59935bcf84ec899bb860465 +size 12477 diff --git a/data/2025/2504_09xxx/2504.09710/images/93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg b/data/2025/2504_09xxx/2504.09710/images/93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..68b0b631d89aaf9d71d48fa6cf5803e6ed669662 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d7b1b6853e02ff3df605318f613b6e20a222a2b570abd78b8ee8bf08845f826e +size 11923 diff --git a/data/2025/2504_09xxx/2504.09710/images/9bfa221ac6381956061ca7ddf44381fc6c6d521915da4f96c930dfbeaff0ce55.jpg b/data/2025/2504_09xxx/2504.09710/images/9bfa221ac6381956061ca7ddf44381fc6c6d521915da4f96c930dfbeaff0ce55.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31121e0aafad97e4351bc04ce65c69b9fe77e378 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/9bfa221ac6381956061ca7ddf44381fc6c6d521915da4f96c930dfbeaff0ce55.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f5094e70ec6277f3e55a718de251bec0d840fc406fd2aefca7b53a24a3cb5617 +size 9221 diff --git a/data/2025/2504_09xxx/2504.09710/images/9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg b/data/2025/2504_09xxx/2504.09710/images/9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d7c10e05e5a20d8a1c119cc1c4d8663064c8c0a8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3496b920e3392ce2035826d22fa3eed3726c328bceec17dc2244cebd6e73e2f +size 13215 diff --git a/data/2025/2504_09xxx/2504.09710/images/aa3785a5149d03ddc5b6795803c83eba1155b634ceaac84b5d8b464165a3bb38.jpg b/data/2025/2504_09xxx/2504.09710/images/aa3785a5149d03ddc5b6795803c83eba1155b634ceaac84b5d8b464165a3bb38.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1682655c0092336f26db34eb2e0d637f68b979ad --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/aa3785a5149d03ddc5b6795803c83eba1155b634ceaac84b5d8b464165a3bb38.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:592d23c6e7b27058845ed0237dbf2ac84111c0d9a02728cdb20ff1358af92d6f +size 18156 diff --git a/data/2025/2504_09xxx/2504.09710/images/b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg b/data/2025/2504_09xxx/2504.09710/images/b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3133a815a2d0edc770840ed8eacf252eb419ef8e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:06705fdbfd1623ea44eeeabd0f427dec91c249ff3b69a5fd9be8e0513d7d6f4e +size 12902 diff --git a/data/2025/2504_09xxx/2504.09710/images/b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg b/data/2025/2504_09xxx/2504.09710/images/b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f36d60785ec004535e2bd14c8b578b310afaf333 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c808b77efed3cd7b8a693d830fb48fc628bede3f054d1f16a4f85c01fdf5a577 +size 12832 diff --git a/data/2025/2504_09xxx/2504.09710/images/bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg b/data/2025/2504_09xxx/2504.09710/images/bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6c077b7cda341b4e458bb2dae4db1b489643e3cb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c21c9113f24c11f9b301c7269aa12389dd6442ff05152b82f7b9eed313a626fc +size 9928 diff --git a/data/2025/2504_09xxx/2504.09710/images/c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg b/data/2025/2504_09xxx/2504.09710/images/c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..243c0564b4645c785e03c39bf8e9b603f23900a5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a3c91fdefbe4d5c65f71671a3c7ec87d23f7a80c7842e1ed6ec40a343588169b +size 12460 diff --git a/data/2025/2504_09xxx/2504.09710/images/c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg b/data/2025/2504_09xxx/2504.09710/images/c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg new file mode 100644 index 0000000000000000000000000000000000000000..da1786eee72879602980c5f1af4d56154935ac85 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1baf28d413156b9d8a5e97df6ab4b89e16c76f4b0722e86cee654aa726f3b5e +size 12662 diff --git a/data/2025/2504_09xxx/2504.09710/images/cf05bc8e173440ec3b5833dd318f2a494d32097e8722b17d7a6a65efc8535d9a.jpg b/data/2025/2504_09xxx/2504.09710/images/cf05bc8e173440ec3b5833dd318f2a494d32097e8722b17d7a6a65efc8535d9a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..026c37367f6800422e09a070e2e1c1d9f99c8d79 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/cf05bc8e173440ec3b5833dd318f2a494d32097e8722b17d7a6a65efc8535d9a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5236083c640fa2a27f41c864aede82bed85baf1b2294dcb459c5cd4782b73586 +size 7707 diff --git a/data/2025/2504_09xxx/2504.09710/images/d8f2b5ddc5aeb603604f13e42282b57951931d142166de91e5f5fae103b11cdf.jpg b/data/2025/2504_09xxx/2504.09710/images/d8f2b5ddc5aeb603604f13e42282b57951931d142166de91e5f5fae103b11cdf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96213b2a9f4fb74af75cb7e25648402399a9cbe3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/d8f2b5ddc5aeb603604f13e42282b57951931d142166de91e5f5fae103b11cdf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:33c5cef1b3e91b7fba59d23078cdce5729b712c2d47daef00be7ecfedb96814c +size 6068 diff --git a/data/2025/2504_09xxx/2504.09710/images/dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg b/data/2025/2504_09xxx/2504.09710/images/dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg new file mode 100644 index 0000000000000000000000000000000000000000..599a3de20face0a25da716f4bf0605fdb1df532a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:91ab6660c6da5bfbe81be860eb2be5f5be40dbb33099e8b5e341907f1d7c079d +size 9296 diff --git a/data/2025/2504_09xxx/2504.09710/images/dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg b/data/2025/2504_09xxx/2504.09710/images/dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aba93ac257693f40cf8c9c79c3f10a0e49703b25 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:27dc03e549e37b27825fae90642f816bf7ae4641b0db60ccca15daef286d8fbe +size 13298 diff --git a/data/2025/2504_09xxx/2504.09710/images/dd93f08e5f66a793480577380f8f65092e30dde1bd171cfaf897dc62534c9941.jpg b/data/2025/2504_09xxx/2504.09710/images/dd93f08e5f66a793480577380f8f65092e30dde1bd171cfaf897dc62534c9941.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b567050a13e2e21590fae826c084f426bd1bf6b5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/dd93f08e5f66a793480577380f8f65092e30dde1bd171cfaf897dc62534c9941.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e90b4c72bff64d939d77b10208a2e3a5a4baa438b8b0769013973b566ccd0317 +size 7745 diff --git a/data/2025/2504_09xxx/2504.09710/images/e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg b/data/2025/2504_09xxx/2504.09710/images/e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..090b1d5eb32668e9cecac6dc19d7d9edad0600b9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8e7b29e84c2f9cd6d86c398102aa616b31d4a6d49b28d013ba888aee6c45a306 +size 9251 diff --git a/data/2025/2504_09xxx/2504.09710/images/e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg b/data/2025/2504_09xxx/2504.09710/images/e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ee26e3dc9b9e91d3db7d4a910087e2602396b4e5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d10f099eceebc914b01ac74f26885ec14480ad82796ade8fe01280eae0f36af2 +size 12669 diff --git a/data/2025/2504_09xxx/2504.09710/images/e6f0c0bb94a114b13ae70ae1245e9712158f506828d63f66fd321b5acaa082cd.jpg b/data/2025/2504_09xxx/2504.09710/images/e6f0c0bb94a114b13ae70ae1245e9712158f506828d63f66fd321b5acaa082cd.jpg new file mode 100644 index 0000000000000000000000000000000000000000..769c5cd7efaa40b9265f1c78763b057106a07f8b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/e6f0c0bb94a114b13ae70ae1245e9712158f506828d63f66fd321b5acaa082cd.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4c7153a03f4a7912327aca003fd9e4908e63b925837f7813eccb31d7d28fca6a +size 41574 diff --git a/data/2025/2504_09xxx/2504.09710/images/ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg b/data/2025/2504_09xxx/2504.09710/images/ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5dba6d1fd1251edf66913b0da7c67751f4235164 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:480fb67212b3581b8146f3a7441d3dd2b2cb8b0460e9706066789d615b6a99a9 +size 9806 diff --git a/data/2025/2504_09xxx/2504.09710/images/ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg b/data/2025/2504_09xxx/2504.09710/images/ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..96554d9881a6867d3bb78a9cbd343746d002a083 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bf56f7e9ea258e51093c621aa31d9c3e23badc221752f141f39328854e7f3540 +size 845 diff --git a/data/2025/2504_09xxx/2504.09710/images/f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg b/data/2025/2504_09xxx/2504.09710/images/f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb65182a4328f231da07ca9b7ec6ff9af798d91e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d532c6306aa4f32a1c5d2c5cc91f1123836b61bb196355a45ee93c3938fe8c7e +size 10304 diff --git a/data/2025/2504_09xxx/2504.09710/images/f5604af7c486c18eb458a78ea908adaa86443af1006735c38828548bf95da459.jpg b/data/2025/2504_09xxx/2504.09710/images/f5604af7c486c18eb458a78ea908adaa86443af1006735c38828548bf95da459.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cf6268290b6da4cf2754ba01cf271d2c5d06c11f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/f5604af7c486c18eb458a78ea908adaa86443af1006735c38828548bf95da459.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60d2c2e89883a425c3e29e894551554ca6c0fec663300c8ba0e51be942670cd1 +size 16108 diff --git a/data/2025/2504_09xxx/2504.09710/images/fa48ca1b252482ec6ec16d8ea638822a6fb14e9c29d445c5b397181341cfa44e.jpg b/data/2025/2504_09xxx/2504.09710/images/fa48ca1b252482ec6ec16d8ea638822a6fb14e9c29d445c5b397181341cfa44e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1144c64289ece8af61dc668885cbe1cfe2735106 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/images/fa48ca1b252482ec6ec16d8ea638822a6fb14e9c29d445c5b397181341cfa44e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b0514297e3539193f396e249a75c75cd2ce22c75a3adbb87c7717dcb385b4ed8 +size 7895 diff --git a/data/2025/2504_09xxx/2504.09710/layout.json b/data/2025/2504_09xxx/2504.09710/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..f95a3af8f404e4d41a19cd7453910b14802d47e7 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09710/layout.json @@ -0,0 +1,11299 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 115, + 97, + 496, + 138 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 97, + 496, + 138 + ], + "spans": [ + { + "bbox": [ + 115, + 97, + 496, + 138 + ], + "type": "text", + "content": "DUMP: Automated Distribution-Level Curriculum Learning for RL-based LLM Post-training" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 118, + 178, + 190, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 178, + 190, + 191 + ], + "spans": [ + { + "bbox": [ + 118, + 178, + 190, + 191 + ], + "type": "text", + "content": "Zhenting Wang" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 207, + 178, + 268, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 178, + 268, + 191 + ], + "spans": [ + { + "bbox": [ + 207, + 178, + 268, + 191 + ], + "type": "text", + "content": "Guofeng Cui" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 285, + 178, + 336, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 285, + 178, + 336, + 190 + ], + "spans": [ + { + "bbox": [ + 285, + 178, + 336, + 190 + ], + "type": "text", + "content": "Yu-Jhe Li" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 352, + 178, + 403, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 352, + 178, + 403, + 190 + ], + "spans": [ + { + "bbox": [ + 352, + 178, + 403, + 190 + ], + "type": "text", + "content": "Kun Wan\\*" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 421, + 178, + 491, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 421, + 178, + 491, + 190 + ], + "spans": [ + { + "bbox": [ + 421, + 178, + 491, + 190 + ], + "type": "text", + "content": "Wentian Zhao\\*" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "spans": [ + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "type": "text", + "content": "Rutgers University " + }, + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 235, + 206, + 375, + 219 + ], + "type": "text", + "content": "Adobe Inc." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 281, + 247, + 329, + 260 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 281, + 247, + 329, + 260 + ], + "spans": [ + { + "bbox": [ + 281, + 247, + 329, + 260 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 140, + 271, + 470, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 271, + 470, + 513 + ], + "spans": [ + { + "bbox": [ + 140, + 271, + 470, + 513 + ], + "type": "text", + "content": "Recent advances in reinforcement learning (RL)-based post-training have led to notable improvements in large language models (LLMs), particularly in enhancing their reasoning capabilities to handle complex tasks. However, most existing methods treat the training data as a unified whole, overlooking the fact that modern LLM training often involves a mixture of data from diverse distributions—varying in both source and difficulty. This heterogeneity introduces a key challenge: how to adaptively schedule training across distributions to optimize learning efficiency. In this paper, we present a principled curriculum learning framework grounded in the notion of distribution-level learnability. Our core insight is that the magnitude of policy advantages reflects how much a model can still benefit from further training on a given distribution. Based on this, we propose a distribution-level curriculum learning framework for RL-based LLM post-training, which leverages the Upper Confidence Bound (UCB) principle to dynamically adjust sampling probabilities for different distributions. This approach prioritizes distributions with either high average advantage (exploitation) or low sample count (exploration), yielding an adaptive and theoretically grounded training schedule. We instantiate our curriculum learning framework with GRPO as the underlying RL algorithm and demonstrate its effectiveness on logic reasoning datasets with multiple difficulties and sources. Our experiments show that our framework significantly improves convergence speed and final performance, highlighting the value of distribution-aware curriculum strategies in LLM post-training. Code: https://github.com/ZhentingWang/DUMP." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 105, + 532, + 192, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 532, + 192, + 544 + ], + "spans": [ + { + "bbox": [ + 105, + 532, + 192, + 544 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 555, + 506, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 555, + 506, + 634 + ], + "spans": [ + { + "bbox": [ + 104, + 555, + 506, + 634 + ], + "type": "text", + "content": "Reinforcement learning (RL)-based post-training has emerged as a powerful approach for enhancing the capabilities of large language models (LLMs), particularly in areas requiring structured reasoning, multi-step inference, and task-specific generalization [1-4]. By leveraging reward signals derived from task performance, human feedback, or domain-specific metrics, RL provides a flexible alternative to supervised fine-tuning. Unlike imitation-based methods that merely mimic reference outputs, RL-based approaches allow models to optimize directly toward behavioral objectives, making them especially effective for boosting model performance on complex reasoning and agentic tasks." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 637, + 506, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 637, + 506, + 717 + ], + "spans": [ + { + "bbox": [ + 104, + 637, + 506, + 717 + ], + "type": "text", + "content": "While RL-based post-training has become a key technique for enhancing LLM capabilities in reasoning, alignment, and coding, one foundational challenge remains underexplored: how to dynamically schedule training across heterogeneous data distributions. In practice, LLMs are post-trained on datasets drawn from a wide variety of sources—ranging from factual QA to math problems and coding tasks—each differing in knowledge/capability relevance, and learning difficulty [5-7]. This heterogeneity is evident in large-scale post-training datasets such as Tulu 3 [7], where prompts span general dialogue, logic puzzles, STEM problems, and multilingual instructions, with" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 220, + 35, + 568 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 220, + 35, + 568 + ], + "spans": [ + { + "bbox": [ + 14, + 220, + 35, + 568 + ], + "type": "text", + "content": "arXiv:2504.09710v3 [cs.LG] 11 Oct 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "spans": [ + { + "bbox": [ + 105, + 731, + 139, + 742 + ], + "type": "text", + "content": "Preprint." + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 193 + ], + "type": "text", + "content": "widely varying counts, formats, and alignment objectives. More recently, next-generation post-training pipelines (e.g., Seed-Thinking v1.5 [8]) have shifted toward synthetic data generation with controllable parameters—e.g., configuring logical puzzle difficulty. This allows fine-grained control over the data distribution, making distribution-level curriculum learning both feasible and increasingly important. Despite this, most RL-based pipelines still treat all data distributions equally—uniformly sampling tasks throughout training or relying on static, hand-designed curricula. This static treatment ignores the model's evolving learning needs and underutilizes the training budget. Moreover, it is difficult to handcraft effective curricula when the post-training data comes from multiple distributions lacking clear difficulty labels. As reinforcement learning becomes increasingly used in post-training and training costs continue to rise, a data-driven curriculum mechanism that dynamically prioritizes learnable distributions is not just desirable, but necessary." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 198, + 506, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 198, + 506, + 276 + ], + "spans": [ + { + "bbox": [ + 104, + 198, + 506, + 276 + ], + "type": "text", + "content": "This motivates the need for automated distribution-level curriculum learning: a dynamic strategy that adjusts sampling probabilities across data distributions throughout training. While prior work has explored instance-level curricula based on sample difficulty [9], and static/heuristic multi-stage schedules have been applied in LLM post-training [10, 11], little attention has been paid to automated, distribution-level scheduling—especially in the context of RL for capability-oriented post-training. The central challenge lies in identifying signals that reflect the current learnability of each distribution and in designing algorithms that can stably and efficiently leverage these signals to guide sampling." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 280, + 506, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 280, + 506, + 488 + ], + "spans": [ + { + "bbox": [ + 104, + 280, + 506, + 488 + ], + "type": "text", + "content": "In this paper, we present DUMP (Automated Distribution-level cUrriculumM learning for RL-based LLM Post-training), a simple but theoretically grounded approach to address this challenge. Our central insight is that the magnitude of policy advantages—the expected absolute difference between a model's predicted return and its baseline value—serves as a natural proxy for distribution-level learnability. High advantages on specific data distribution indicate underfitting and high potential for improvement on it, while low advantages suggest diminishing returns. Moreover, the statistical reliability of these advantage estimates improves with the number of samples drawn from each distribution. DUMP operationalizes this insight by using bandit-style Upper Confidence Bound (UCB) scores to schedule distribution sampling. It maintains a sliding window of recent advantage magnitudes for each distribution and computes a score that balances exploitation (high advantage) and exploration (low visitation). These scores are normalized via a softmax to form sampling weights, which are then used to generate training batches. Unlike fixed or heuristic curricula, DUMP adapts throughout training based on empirical signals, and can be seamlessly integrated into standard LLM RL pipelines. We instantiate DUMP with GRPO [3], but the method is compatible with any advantage-based RL algorithm. We evaluate DUMP on logic reasoning corpora. Our experiments show that DUMP significantly accelerates convergence and yields stronger performance compared to uniform sampling. Furthermore, we provide theoretical analysis that supports the use of absolute advantages as a surrogate for distribution-level learnability, formalizing its connection to sample efficiency and regret minimization." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 493, + 507, + 559 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 493, + 507, + 559 + ], + "spans": [ + { + "bbox": [ + 104, + 493, + 507, + 559 + ], + "type": "text", + "content": "We summarize our contributions as follows. ① We highlight the underexplored challenge of curriculum learning at the distribution level for RL-based post-training aimed at capability enhancement. ② We propose DUMP, a theoretically grounded framework that leverages advantage-based UCB scores to adaptively guide training over data distributions. ③ We demonstrate DUMP's effectiveness through empirical results and theoretical analysis, showing that it enables faster, more efficient improvement on LLM capabilities." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 571, + 190, + 583 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 571, + 190, + 583 + ], + "spans": [ + { + "bbox": [ + 105, + 571, + 190, + 583 + ], + "type": "text", + "content": "2 Background" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 590, + 507, + 723 + ], + "type": "text", + "content": "RL-based LLM Post-training. Reinforcement learning (RL) plays a central role in post-training large language models (LLMs), especially for tasks involving reasoning, subjective preference, or long-horizon control. The RLHF framework [1, 12-15] laid the foundation by aligning models using reward signals derived from human preferences. Beyond preference alignment, recent RL-based post-training approaches have notably enhanced LLMs' capabilities in complex reasoning tasks, particularly coding and mathematics. For instance, RL post-trained model OpenAI o1 [16], o3 [17, 18], DeepSeek-R1 [4] significantly outperform LLMs without RL post-training such as pre-trained versions of GPT-4o [19] and DeepSeek-V3 [20] on challenging mathematics and coding benchmarks (e.g., AIME [21] and Codeforces [22]). Proximal Policy Optimization (PPO) [23] is widely used in post-training due to its clipped objective, which stabilizes training by preventing large policy updates. PPO remains a strong baseline in many LLM alignment settings. Direct Preference Optimization (DPO) [2] simplifies the pipeline by replacing RL rollouts with a classification-style loss" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": "derived from a KL-constrained reward maximization objective. While DPO works well on pairwise preference data, it does not naturally support group-wise or comparative feedback. Group Relative Policy Optimization (GRPO) [3] addresses this limitation by leveraging group-based feedback. For each input prompt " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", GRPO samples a group of " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "G" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " candidate outputs " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "\\{o_1, \\ldots, o_G\\} \\sim \\pi_{\\mathrm{ref}}(\\cdot | x)" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " from a frozen reference policy " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ". Each output " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " is assigned a reward " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": ", and the advantage of " + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 72, + 504, + 139 + ], + "type": "text", + "content": " is computed by normalizing its reward relative to others in the group:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 237, + 146, + 505, + 172 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 237, + 146, + 505, + 172 + ], + "spans": [ + { + "bbox": [ + 237, + 146, + 505, + 172 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {i} = \\frac {r _ {i} - \\operatorname {m e a n} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right)}{\\operatorname {s t d} \\left(\\left\\{r _ {1} , \\dots , r _ {G} \\right\\}\\right) + \\epsilon}, \\tag {1}", + "image_path": "4829e0d465ea4f5e6dd0b5ae67f449549fb179cafe54cc73f5ec255995575a04.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "spans": [ + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\epsilon > 0" + }, + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "text", + "content": " is a small constant for numerical stability. These normalized advantages capture the relative quality of outputs within the group. The model policy " + }, + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 178, + 504, + 211 + ], + "type": "text", + "content": " is then updated by maximizing the following clipped surrogate objective:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 217, + 504, + 256 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 217, + 504, + 256 + ], + "spans": [ + { + "bbox": [ + 106, + 217, + 504, + 256 + ], + "type": "interline_equation", + "content": "\\mathcal {J} _ {\\mathrm {G R P O}} (\\theta) = \\mathbb {E} _ {x, \\left\\{o _ {i} \\right\\}} \\left[ \\frac {1}{G} \\sum_ {i = 1} ^ {G} \\min \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)} \\hat {A} _ {i}, \\operatorname {c l i p} \\left(\\frac {\\pi_ {\\theta} \\left(o _ {i} \\mid x\\right)}{\\pi_ {\\mathrm {o l d}} \\left(o _ {i} \\mid x\\right)}, 1 - \\epsilon , 1 + \\epsilon\\right) \\hat {A} _ {i}\\right) - \\beta \\mathbb {D} _ {\\mathrm {K L}} \\left(\\pi_ {\\theta} \\| \\pi_ {\\text {r e f}}\\right) \\right], \\tag {2}", + "image_path": "f5604af7c486c18eb458a78ea908adaa86443af1006735c38828548bf95da459.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "spans": [ + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}(o_i|x)" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": " is the probability assigned by the current model to output " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{old}}(o_i|x)" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": " is the same under the model from previous step, and " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}(o_i|x)" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": " is that under the reference model. The first term inside the summation is a clipped policy ratio scaled by " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\hat{A}_i" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": ", similar to PPO [23], which prevents overly large updates. The outer expectation is taken over prompts " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": " and their sampled output groups " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\{o_i\\}" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": ". The second term is a KL divergence penalty that regularizes the updated policy " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": " to stay close to " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\pi_{\\mathrm{ref}}" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": ", weighted by a hyperparameter " + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 104, + 256, + 504, + 335 + ], + "type": "text", + "content": ". This formulation eliminates the need for an explicit value baseline and stabilizes training by comparing outputs within local groups." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 340, + 506, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 340, + 506, + 505 + ], + "spans": [ + { + "bbox": [ + 104, + 340, + 506, + 505 + ], + "type": "text", + "content": "Curriculum Learning for RL. Curriculum learning [24, 25] organizes training by progressing from easy to hard examples. In RL, curricula often follow task complexity [26-28], or are learned via teacher-student frameworks modeled as partially observable Markov decision process [29, 30]. With the adoption of RL in LLM post-training, curriculum learning has shown potential for improving both training efficiency and model effectiveness. For example, Curri-DPO [9] constructs instance-level curricula by ranking preference pairs based on the score gap between preferred and dispreferred responses, introducing harder pairs gradually during DPO fine-tuning. Kimi k1.5 [10] and LogicRL [11], on the other hand, use manually defined heuristic curricula with fixed training stages, e.g., models are first trained on \"easy\" samples for a pre-specified number of steps, then switched to \"hard\" samples. These strategies rely on static schedules and heuristic difficulty labels, without adapting to the model's learning progress. While these works demonstrate the benefit of curriculum learning in LLM post-training, most existing approaches focus on instance-level difficulty or use static, manually designed strategies. In contrast, automatic curriculum learning at the distribution level, especially in RL-based post-training, remains underexplored. In this paper, we propose DUMP to fill this gap by adaptively scheduling training over distributions using advantage-based learnability signals." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 516, + 167, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 516, + 167, + 528 + ], + "spans": [ + { + "bbox": [ + 105, + 516, + 167, + 528 + ], + "type": "text", + "content": "3 Method" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 536, + 506, + 581 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 506, + 581 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 506, + 581 + ], + "type": "text", + "content": "In this section, we introduce DUMP, a distribution-level curriculum learning framework for RL-based LLM post-training. We first introduce expected absolute advantage as a proxy for learnability, and formalize the scheduling problem as a multi-armed bandit. We then describe a UCB-based strategy to guide distribution selection, followed by the full implementation of DUMP." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 105, + 592, + 336, + 604 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 592, + 336, + 604 + ], + "spans": [ + { + "bbox": [ + 105, + 592, + 336, + 604 + ], + "type": "text", + "content": "3.1 Measuring Learnability via Absolute Advantage" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 654 + ], + "type": "text", + "content": "We aim to dynamically assess the usefulness of different data distributions during LLM reinforcement learning post-training. Intuitively, a distribution is more useful (or \"learnable\") if the model can gain more from training on its samples. To help understand and measure the learnability of the data samples from different distributions, we provide the following theorem:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": "Theorem 3.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": " and a data distribution " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": ", the expected absolute advantage " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{x \\sim d} \\left[ \\mathbb{E}_{o_i \\sim \\pi_{\\theta}(\\cdot | x)} \\left[ |\\hat{A}_i| \\right] \\right]" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": " serves as a proxy for how much that distribution " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": " can help the model improve, where the distribution " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": " consisting of prompts " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "x \\sim d" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": ", each prompt has a group of sampled outputs " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\{o_1, \\ldots, o_n\\}" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{A}_i" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": " denotes the advantage of output " + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 658, + 506, + 723 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 506, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 506, + 226 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 506, + 226 + ], + "type": "text", + "content": "The proof can be found in Appendix A. Intuitively, if training on a distribution results in a larger expected advantage magnitude, then that distribution is considered more learnable. The advantage function measures the deviation between an action's predicted value and its actual return; a large advantage—either positive or negative—indicates that the model's current policy is still far from optimal on those samples but has a large potential to improve. A small advantage magnitude does not necessarily imply mastery—it may also occur when a task is too difficult or noisy for the model to learn from effectively, resulting in weak or unstable learning signals. To capture this deviation in both directions, we take the absolute value of the advantage. Without this, positive and negative advantages within a batch may cancel out, masking the true extent of the model's uncertainty or suboptimality. By averaging the absolute advantage over multiple sampled outputs and prompts, we obtain a robust estimate of how much learning signal remains in a given distribution. This expected absolute advantage thus acts as a practical proxy for distribution-level learnability: it reflects how much the model can benefit from training on that distribution. It also has the strength of being lightweight to compute in RL pipelines, as advantage estimates are already generated during rollout." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 236, + 458, + 247 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 236, + 458, + 247 + ], + "spans": [ + { + "bbox": [ + 104, + 236, + 458, + 247 + ], + "type": "text", + "content": "3.2 Formalizing Distribution-Level Curriculum Learning as Multi-armed Bandit" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "spans": [ + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "text", + "content": "We aim to design a curriculum learning strategy that dynamically allocates training focus across multiple data distributions to maximize overall model improvement. Let " + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{d_1, \\dots, d_N\\}" + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "text", + "content": " be a set of data distributions. At each training step, we sample a batch of examples " + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "text", + "content": " by drawing prompts from these distributions according to a learnable sampling policy, and use the batch to update model parameters " + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 253, + 504, + 319 + ], + "type": "text", + "content": " via reinforcement learning. The goal is to assign higher sampling probabilities to distributions that offer greater learning potential, thereby maximizing cumulative capability gain." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "spans": [ + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": "As motivated in Theorem 3.1, we quantify the learning potential of a distribution " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": " via its expected absolute advantage, defined as " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "L(d) = \\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o\\sim \\pi_{\\theta}(\\cdot |x)}\\left[\\left|\\hat{A}(o)\\right|\\right]\\right]" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ". Our objective is to dynamically adjust the sampling distribution over " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": " such that, over the training horizon " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ", we approximately maximize the total expected learnability gain " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "\\sum_{t=1}^{T}\\mathbb{E}_{d\\sim P_t}[L(d)]" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "P_t" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": " is the sampling distribution at step " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ". This setup resembles a multi-armed bandit (MAB) problem, where each distribution acts as an arm and its reward corresponds to its learnability. In this setting, the central challenge is to estimate and balance each distribution's potential: exploiting those with high observed advantage while still exploring under-sampled ones that may offer long-term benefit. To this end, we adopt the classic Upper Confidence Bound (UCB) principle [31], which provides theoretical guarantees for balancing exploration and exploitation in bandit problems. Specifically, UCB-based algorithms achieve sublinear regret compared to the optimal fixed-arm strategy, and we show in Appendix B that applying UCB on empirical advantage statistics yields a near-optimal schedule under mild assumptions. To allow smoother allocation of sampling probabilities without hard cutoffs and reducing variance in learning, we adopt a soft-selection mechanism: instead of choosing one distribution at each step, we compute a UCB score for every distribution and normalize the scores with a softmax function to obtain a sampling distribution. This soft-selection formulation preserves the spirit of UCB—higher scoring distributions are sampled more—but enables partial exploration of all arms, and it is easier to integrate into LLM training pipelines. The resulting sampling distribution provides a convex mixture over data sources, where each distribution " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": " is selected with probability. Each training batch is then composed by drawing examples from multiple distributions in proportion to their scores. To estimate learnability in practice, we maintain a sliding window " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{d_j}^w" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": " of recent absolute advantages for each distribution " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ", and define its empirical reward as the mean absolute advantage: " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "\\hat{L}(d_j) = \\frac{1}{|\\mathcal{A}_{d_j}^w|}\\sum_{a\\in \\mathcal{A}_{d_j}^w}|a|" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ". We also track the total number of samples drawn from each distribution " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "n_{d_j}" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ", and the global sample count " + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "inline_equation", + "content": "n_{\\mathrm{total}} = \\sum_{j}n_{d_j}" + }, + { + "bbox": [ + 104, + 324, + 506, + 608 + ], + "type": "text", + "content": ". The UCB score for each distribution is:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 221, + 613, + 504, + 644 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 221, + 613, + 504, + 644 + ], + "spans": [ + { + "bbox": [ + 221, + 613, + 504, + 644 + ], + "type": "interline_equation", + "content": "\\mathrm {U C B} \\left(d _ {j}\\right) = \\hat {L} \\left(d _ {j}\\right) + \\sqrt {\\frac {2 \\log \\left(n _ {\\text {t o t a l}} + 1\\right)}{n _ {d _ {j}} + 1}} \\tag {3}", + "image_path": "dd93f08e5f66a793480577380f8f65092e30dde1bd171cfaf897dc62534c9941.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "spans": [ + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": "The first term encourages exploitation of distributions with high observed advantages, while the second term ensures sufficient exploration of rarely sampled distributions. To obtain the final sampling weights, we apply a softmax over the UCB scores. Specifically, the probability of selecting distribution " + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": " is computed as: " + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "inline_equation", + "content": "P(d_{j}) = \\frac{\\exp(\\mathrm{UCB}(d_{j}) / \\tau)}{\\sum_{j=1}^{N} \\exp(\\mathrm{UCB}(d_{j}) / \\tau)}" + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "inline_equation", + "content": "\\tau > 0" + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": " is a temperature hyperparameter that controls the sharpness of the sampling distribution. A lower " + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": " results in more peaked selection around the top-scoring distributions, while a higher " + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 649, + 504, + 722 + ], + "type": "text", + "content": " leads to a smoother, more exploratory curriculum. This" + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 106, + 85, + 505, + 412 + ], + "blocks": [ + { + "bbox": [ + 106, + 72, + 453, + 84 + ], + "lines": [ + { + "bbox": [ + 106, + 72, + 453, + 84 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 453, + 84 + ], + "type": "text", + "content": "Algorithm 1 Automated Distribution-Level Curriculum Learning with UCB Sampling" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "lines": [ + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": "Input: Dataset " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{d_1,\\dots ,d_N\\}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " ; pre-trained model parameters " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \nOutput: Post-trained model parameters " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n1: function DUMP(D, " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n2: Initialize distribution-level statistics \n3: for each " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_{j}\\in \\mathcal{D}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " do \n4: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "A_{dj}^{w}\\gets []" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Sliding window for absolute advantages \n5: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "n_{d_j}\\gets 0" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Total samples seen from " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n6: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "P(d_{j})\\leftarrow \\frac{1}{N}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Equal initial weights \n7: for training step " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "t = 1,2,\\ldots ,T" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " do \n8: Sample batch " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " from " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " according to " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "P(d_j)" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n9: Compute advantages " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\hat{A} (o)" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " for all " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "o\\in \\mathcal{B}_t" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " via model rollout \n10: for each " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " with samples in " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " do \n11: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "n_{d_j}\\gets n_{d_j} + |\\mathcal{B}_{t,d_j}|" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Update sample count; " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_{t,d_j}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " : subset of batch from " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n12: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{dj}^{w}\\gets \\mathcal{A}_{dj}^{w}\\cup \\{\\left|\\hat{A} (o)\\right|\\mid x\\in \\mathcal{B}_{t,d_j},o\\sim \\pi_{\\theta}(\\cdot |x)\\}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Append new advantages from " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n13: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "A_{dj}^{w}\\gets A_{dj}^{w}[-k:]" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " k: Window Size; Keep last " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " elements \n14: Compute UCB scores for each distribution \n15: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "n_{\\mathrm{total}}\\gets \\sum_{d_j\\in \\mathcal{D}}n_{d_j}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " \n16: for each " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "d_{j}\\in \\mathcal{D}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " do \n17: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\hat{L} (d_j)\\gets \\frac{1}{|\\mathcal{A}_{dj}^w|}\\sum_{a\\in \\mathcal{A}_{dj}^w}a" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Mean of absolute advantages \n18: UCB " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "(d_j)\\gets \\hat{L} (d_j) + \\sqrt{\\frac{2\\log(n_{\\mathrm{total}} + 1)}{n_{d_j} + 1}}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " Eq.3 \n19: Update sampling distribution \n20: " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "P(d_j)\\gets \\frac{\\exp(UCB(d_j) / \\tau)}{\\sum_{j = 1}^{N}\\exp(UCB(d_j) / \\tau)}\\quad \\forall d_j\\in \\mathcal{D}" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " △: temperature \n21: Update " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " using " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "text", + "content": " with an RL algorithm (e.g., GRPO) \n22: return " + }, + { + "bbox": [ + 106, + 85, + 505, + 412 + ], + "type": "inline_equation", + "content": "\\theta" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "algorithm" + }, + { + "bbox": [ + 104, + 434, + 504, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 434, + 504, + 479 + ], + "spans": [ + { + "bbox": [ + 104, + 434, + 504, + 479 + ], + "type": "text", + "content": "bandit-based formulation provides a lightweight, adaptive, and reward-sensitive curriculum learning mechanism. It balances the need to focus on learnable distributions while avoiding premature neglect of underexplored ones. In the next section, we present the complete algorithmic implementation of DUMP, including its integration with rollout procedures and online statistics tracking." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 487, + 175, + 498 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 487, + 175, + 498 + ], + "spans": [ + { + "bbox": [ + 105, + 487, + 175, + 498 + ], + "type": "text", + "content": "3.3 Algorithm" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": "The detailed curriculum learning procedure is illustrated in Algorithm 1. The algorithm takes as input a dataset " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{d_1, \\ldots, d_N\\}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " composed of multiple distributions and returns the optimized model parameters " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " through a reinforcement learning loop. In lines 3-6, we initialize per-distribution statistics: each distribution " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "d_j \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " is associated with an empty sliding window " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{d_j}^w" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " to store recent absolute advantages, a counter " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "n_{d_j}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " for tracking the number of samples drawn from " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": ", and an initial sampling probability " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "P(d_j) = \\frac{1}{N}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " indicating uniform sampling. At each training step " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " (line 8), a batch " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " is sampled according to the current distribution weights " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "P(d_j)" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": ". Advantages " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{A}(o)" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " are then computed via model rollouts for each sampled output " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "o \\in \\mathcal{B}_t" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " (line 9). For every distribution " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " that contributes samples in the current batch, we update its sample count " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "n_{d_j}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " (line 11), append the corresponding advantages to its sliding window " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{d_j}^w" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " (line 12), and truncate the window to retain only the most recent " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " entries (300 by default) in line 13. This ensures that our estimate of per-distribution learnability remains up-to-date and robust to noise. In lines 15-18, we compute the Upper Confidence Bound (UCB) score " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{UCB}(d_j)" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " for each distribution. The score consists of two terms: the empirical mean absolute advantage " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\hat{L}(d_j)" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " over the sliding window " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\mathcal{A}_{d_j}^w" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": ", and an exploration bonus inversely proportional to the square root of the number of samples " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "n_{d_j}" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": ". This balances prioritization of distributions that are either highly learnable or underexplored. In line 20, the sampling probabilities " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "P(d_j)" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " are updated by applying a softmax over the UCB scores with a temperature parameter " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " (0.1 by default). Lower values of " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " result in sharper distributions that concentrate more heavily on top-ranked distributions, while higher " + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 502, + 506, + 723 + ], + "type": "text", + "content": " values induce a smoother, more exploratory curriculum. Finally, in line" + } + ] + } + ], + "index": 4 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "spans": [ + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": "21, the model parameters " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " are updated using the current batch " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\mathcal{B}_t" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " with a reinforcement learning algorithm such as GRPO. After " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": " steps, the algorithm returns the post-trained model " + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "inline_equation", + "content": "\\theta" + }, + { + "bbox": [ + 104, + 72, + 504, + 106 + ], + "type": "text", + "content": ", which has been adaptively guided to learn from the most informative distributions." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 105, + 117, + 254, + 131 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 117, + 254, + 131 + ], + "spans": [ + { + "bbox": [ + 105, + 117, + 254, + 131 + ], + "type": "text", + "content": "4 Experiments and Results" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 136, + 506, + 181 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 136, + 506, + 181 + ], + "spans": [ + { + "bbox": [ + 104, + 136, + 506, + 181 + ], + "type": "text", + "content": "In this section, we first introduce our experiments setup including used models datasets and more implementation details. We then demonstrate the results for the effectiveness of our method DUMP. More discussion about the comparison to static heuristic curriculum [11, 10] can be found in Appendix C." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 105, + 191, + 212, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 191, + 212, + 204 + ], + "spans": [ + { + "bbox": [ + 105, + 191, + 212, + 204 + ], + "type": "text", + "content": "4.1 Experiments Setup" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 209, + 507, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 209, + 507, + 243 + ], + "spans": [ + { + "bbox": [ + 104, + 209, + 507, + 243 + ], + "type": "text", + "content": "RL Algorithm and LLM Models. We use GRPO [3] as the underlying RL algorithm in our experiments, which is commonly used in capability-oriented LLM post-training [4]. We use Qwen2.5-7B-Instruct-1M [32] and Qwen2.5-3B-Instruct [32] in our experiments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 247, + 506, + 292 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 506, + 292 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 506, + 292 + ], + "type": "text", + "content": "Datasets and Settings. Multiple datasets are used in our experiments, including Knights and Knaves (K&K) puzzle dataset [33], RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench [39], GSM-8K [40], and AIME 1983-2024 [21]. In our experiments, we consider three different settings. The prompt template used in shown in Figure 3 in the Appendix." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 297, + 506, + 373 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 297, + 506, + 373 + ], + "spans": [ + { + "bbox": [ + 104, + 297, + 506, + 373 + ], + "type": "text", + "content": "Setting 1: Post-training on " + }, + { + "bbox": [ + 104, + 297, + 506, + 373 + ], + "type": "inline_equation", + "content": "K \\& K" + }, + { + "bbox": [ + 104, + 297, + 506, + 373 + ], + "type": "text", + "content": " puzzles with varying character numbers. The Knights and Knaves (K&K) dataset [33] contains procedurally generated logic puzzles where each character is either a knight (always truthful) or a knave (always lying), and the goal is to infer each character's identity. The dataset supports fine-grained difficulty control by adjusting the number of characters. We generate puzzles with 3 to 14 characters, treating each character count as a separate distribution—yielding 12 distinct distributions. Each distribution includes 900 training and 100 test samples. We post-train Qwen2.5-7B-Instruct-1M on the combined dataset across all distributions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 378, + 506, + 456 + ], + "type": "text", + "content": "Setting 2: Post-training on diverse logic reasoning distributions. We perform post-training using a mixture of logic reasoning datasets, including RuleTaker [34], ProofWriter [35], AR-LSAT [36], LogiQA [37], LogicNLI [38], LongICLBench Geomotion [39], and Knights and Knaves (K&K) [33]. For RuleTaker, ProofWriter, and K&K, we further partition the data distributions by complexity levels: RuleTaker by 2, 3, and 5 required reasoning steps; ProofWriter by 3, 4, and 5 required reasoning steps; and K&K by the number of characters (3-7). In total, we construct 15 logic distributions, each containing 400 training samples. We use Qwen2.5-7B-Instruct-1M for this setting." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 460, + 506, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 460, + 506, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 460, + 506, + 526 + ], + "type": "text", + "content": "Setting 3: Post-training on diverse math reasoning distributions. We also explore post-training on diverse math data. For AIME, we split the data into four distributions based on competition years—1983–1993, 1994–2004, 2005–2015, and 2016–2024—since problem styles evolve significantly over time. We also include GSM-8K as a complementary math dataset. This results in five math distributions in total, with 7473 (GSM-8K), 124, 194, 283, and 238 training samples, respectively. We use Qwen2.5-3B-Instruct for this setting." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 531, + 506, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 531, + 506, + 587 + ], + "spans": [ + { + "bbox": [ + 104, + 531, + 506, + 587 + ], + "type": "text", + "content": "Reward Implementation. We adopt the rule-based reward mechanism Shao et al. [3] to provide stable and hack-resistant training signals during RL-based post-training and follow the detailed reward implementation in Logic-RL [11]. Specifically, each model response is expected to follow a structured format with the reasoning process enclosed in tags and the final answer enclosed in tags. The reward system consists of two components:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 132, + 597, + 506, + 668 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 132, + 597, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 597, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 132, + 597, + 504, + 631 + ], + "type": "text", + "content": "- Format Reward. A binary reward based on whether the output strictly adheres to the expected format. If the model includes exactly one well-formed and one section in the correct order, it receives a reward of +1; otherwise, it receives a penalty of -1." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 132, + 634, + 506, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 634, + 506, + 668 + ], + "spans": [ + { + "bbox": [ + 132, + 634, + 506, + 668 + ], + "type": "text", + "content": "- Answer Reward. We evaluate the correctness of the final answer. If the predicted identities fully match the ground truth, the model receives a reward of " + }, + { + "bbox": [ + 132, + 634, + 506, + 668 + ], + "type": "inline_equation", + "content": "+2" + }, + { + "bbox": [ + 132, + 634, + 506, + 668 + ], + "type": "text", + "content": "; if the answer is incorrect, -1.5; and if the answer is missing or unparsable, -2." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": "Other Implementation Details. All experiments are conducted on servers equipped with 8 Nvidia A100 GPUs. Our method is implemented with VeRL [41] LLM Reinforcement Learning framework. We use GRPO [3] as the training algorithm and follow standard practice for actor rollout and optimization. The actor learning rate is set to " + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "inline_equation", + "content": "1e - 6" + }, + { + "bbox": [ + 104, + 677, + 506, + 723 + ], + "type": "text", + "content": ", training batch size is set to 128, and the PPO" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 124, + 70, + 297, + 224 + ], + "blocks": [ + { + "bbox": [ + 124, + 70, + 297, + 224 + ], + "lines": [ + { + "bbox": [ + 124, + 70, + 297, + 224 + ], + "spans": [ + { + "bbox": [ + 124, + 70, + 297, + 224 + ], + "type": "table", + "html": "
Data Distributionwithout DUMPwith DUMP
RuleTaker 2 Steps0.790.79
RuleTaker 3 Steps0.761.02
RuleTaker 5 Steps0.560.98
ProofWriter 3 Steps1.181.09
ProofWriter 4 Steps0.971.09
ProofWriter 5 Steps1.241.05
AR-LSAT-0.70-0.52
LogiQA1.941.70
LogicNLI-0.29-0.23
LongICLBench Geomotion0.540.25
K & K 3 Characters2.002.00
K & K 4 Characters1.541.76
K & K 5 Characters1.531.84
K & K 6 Characters0.831.42
K & K 7 Characters0.561.02
Average0.901.17
", + "image_path": "e6f0c0bb94a114b13ae70ae1245e9712158f506828d63f66fd321b5acaa082cd.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 339, + 110, + 486, + 184 + ], + "blocks": [ + { + "bbox": [ + 104, + 229, + 315, + 262 + ], + "lines": [ + { + "bbox": [ + 104, + 229, + 315, + 262 + ], + "spans": [ + { + "bbox": [ + 104, + 229, + 315, + 262 + ], + "type": "text", + "content": "Table 1: Test Answer Reward (see Section 4.1) on diverse logic reasoning distributions (Setting 2). The model used here is Qwen2.5-7B-Instruct-1M." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 339, + 110, + 486, + 184 + ], + "lines": [ + { + "bbox": [ + 339, + 110, + 486, + 184 + ], + "spans": [ + { + "bbox": [ + 339, + 110, + 486, + 184 + ], + "type": "table", + "html": "
Data Distributionwithout DUMPwith DUMP
GSM-8K1.501.47
AIME 1983-1993-0.76-0.39
AIME 1994-2004-1.50-1.02
AIME 2005-2015-0.94-0.94
AIME 2016-2024-1.27-1.27
Average-0.59-0.43
", + "image_path": "aa3785a5149d03ddc5b6795803c83eba1155b634ceaac84b5d8b464165a3bb38.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 320, + 190, + 504, + 234 + ], + "lines": [ + { + "bbox": [ + 320, + 190, + 504, + 234 + ], + "spans": [ + { + "bbox": [ + 320, + 190, + 504, + 234 + ], + "type": "text", + "content": "Table 2: Test Answer Reward (see Section 4.1) on diverse math reasoning distributions (Setting 3). The model used here is Qwen2.5-3B-Instruct." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "spans": [ + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "text", + "content": "mini-batch size is 32. KL divergence regularization is applied to encourage alignment with the reference policy, with a KL loss coefficient of 0.001. Each rollout batch contains 16 responses. If not specified, we allow for a maximum response length of 20480 and 4096 tokens during training for Qwen2.5-7B-Instruct-1M and Qwen2.5-3B-Instruct, respectively. The window size " + }, + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "text", + "content": " and the temperature " + }, + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 104, + 269, + 504, + 324 + ], + "type": "text", + "content": " in our curriculum learning framework is set to 300 and 0.1, respectively." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 105, + 332, + 231, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 332, + 231, + 342 + ], + "spans": [ + { + "bbox": [ + 105, + 332, + 231, + 342 + ], + "type": "text", + "content": "4.2 Effectiveness of DUMP" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 347, + 506, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 347, + 506, + 502 + ], + "spans": [ + { + "bbox": [ + 104, + 347, + 506, + 502 + ], + "type": "text", + "content": "Setting 1: Post-training on the combination of K&K puzzle datasets with different number of characters. To evaluate the effectiveness of DUMP in improving post-training efficiency and performance, we compare it against a uniform distribution sampling baseline across 12 distinct data distributions in the K&K puzzle dataset. Each distribution corresponds to a fixed number of characters in the puzzle, ranging from 3 to 14. Figure 1 plots the test answer reward over training steps for each distribution, with and without DUMP. Across all distributions, DUMP consistently outperforms the baseline, achieving faster convergence and higher test performance. The gains are particularly notable in mid- to high-difficulty distributions (e.g., 6 to 12 characters), where uniform sampling tends to struggle due to data underutilization. For example, in the 9-character distribution (Figure 1g), the model trained with DUMP achieves a reward of over 0.5, whereas the baseline remains below 0.0. These results validate the core intuition of DUMP: dynamically adjusting the sampling focus toward high-learnability distributions accelerates policy improvement while avoiding wasted effort on over-saturated or low-signal data. Notably, the improvement is achieved without any curriculum heuristics or manual data ordering—only by observing advantage signals and adapting online." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 104, + 506, + 504, + 583 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 506, + 504, + 583 + ], + "spans": [ + { + "bbox": [ + 104, + 506, + 504, + 583 + ], + "type": "text", + "content": "Setting 2: Post-training on diverse logic reasoning distributions. We apply DUMP to 15 logic reasoning distributions including subsets of RuleTaker, ProofWriter, and K&K (with varying difficulty levels), as well as datasets such as AR-LSAT, LogiQA, LogicNLI, and LongICLBench. As shown in Table 1, DUMP improves the average test answer reward from 0.90 to 1.17. Notable improvements are observed on complex tasks such as AR-LSAT, where the reward increases from -0.70 to -0.52, and K&K 7 Characters, from 0.56 to 1.02. These results demonstrate that DUMP adaptively prioritizes undertrained but learnable distributions, leading to more efficient capability gains." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 587, + 504, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 587, + 504, + 642 + ], + "spans": [ + { + "bbox": [ + 104, + 587, + 504, + 642 + ], + "type": "text", + "content": "Setting 3: Post-training on diverse math data distributions. We further evaluate DUMP on GSM-8K and different subsets of AIME grouped by competition years. As shown in Table 2, DUMP raises the average test answer reward from -0.59 to -0.43, with the most significant gain on AIME 1994-2004, where performance improves from -1.50 to -1.02. These results highlight DUMP's robustness under distribution shifts and data imbalance." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 105, + 651, + 305, + 663 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 651, + 305, + 663 + ], + "spans": [ + { + "bbox": [ + 105, + 651, + 305, + 663 + ], + "type": "text", + "content": "4.3 Ablation Study on the Sampling Strategy" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "text", + "content": "In this section, we ablate the sampling strategy used in DUMP's UCB-based scheduler. As described in Algorithm 1, our method applies soft sampling controlled by a temperature parameter. The greedy variant (temperature " + }, + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "inline_equation", + "content": "= 0" + }, + { + "bbox": [ + 104, + 666, + 504, + 723 + ], + "type": "text", + "content": ") always selects the distribution with the highest UCB score, while our default uses a small temperature (0.1) to enable probabilistic sampling. We conduct experiments under Setting 1, with a maximum training response length of 10240 tokens. After 100 training steps, the" + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 71, + 236, + 156 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "type": "image", + "image_path": "2d84ad10a799a03d621f293c95c9751efb7f544fd948539b15f56498132f463c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 240, + 71, + 369, + 156 + ], + "blocks": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "lines": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "spans": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "type": "image", + "image_path": "9e9ef5825d12f23ef4294c784c61776bb5627c910310930c8ca5388763eedfff.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 72, + 502, + 156 + ], + "blocks": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "lines": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "spans": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "type": "image", + "image_path": "b42d5b9d03640e1b2d58067da1a5d77bcd6058d10d8acd393caffffa24d0c095.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 107, + 175, + 236, + 262 + ], + "blocks": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "lines": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "spans": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "type": "text", + "content": "(a) 3 Characters" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "lines": [ + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "type": "image", + "image_path": "b441adacc37480df5405e8d06df557db3f332d10da975d85a6a2fec50e4bcdca.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 240, + 175, + 369, + 261 + ], + "blocks": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "lines": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "spans": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "type": "text", + "content": "(b) 4 Characters" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "lines": [ + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "spans": [ + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "type": "image", + "image_path": "dd3c71bd214119d0d2c609783ec05f1acbc266fc90b30589cda8415312677dec.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 373, + 175, + 502, + 261 + ], + "blocks": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "lines": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "spans": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "type": "text", + "content": "(c) 5 Characters" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "lines": [ + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "spans": [ + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "type": "image", + "image_path": "90aa2351a348cc35cdc45b61d56ca9e75c94d18c9af73121a1b2516738e49c84.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 107, + 280, + 236, + 365 + ], + "blocks": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "lines": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "spans": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "type": "text", + "content": "(d) 6 Characters" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 280, + 236, + 365 + ], + "lines": [ + { + "bbox": [ + 107, + 280, + 236, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 280, + 236, + 365 + ], + "type": "image", + "image_path": "7dba6855cb7a7d7cb4cefc1e6b8ce55ec45b710f83cbbc554a09682679d2d825.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 240, + 280, + 369, + 365 + ], + "blocks": [ + { + "bbox": [ + 276, + 268, + 334, + 277 + ], + "lines": [ + { + "bbox": [ + 276, + 268, + 334, + 277 + ], + "spans": [ + { + "bbox": [ + 276, + 268, + 334, + 277 + ], + "type": "text", + "content": "(e) 7 Characters" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "lines": [ + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "spans": [ + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "type": "image", + "image_path": "e4805948e0cca244ad9a13762138150a45a99b18afcb9bb2522dc2b2b6595d89.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 373, + 280, + 502, + 365 + ], + "blocks": [ + { + "bbox": [ + 409, + 268, + 466, + 277 + ], + "lines": [ + { + "bbox": [ + 409, + 268, + 466, + 277 + ], + "spans": [ + { + "bbox": [ + 409, + 268, + 466, + 277 + ], + "type": "text", + "content": "(f) 8 Characters" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "lines": [ + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "spans": [ + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "type": "image", + "image_path": "93f81e3b516330cf41e06a4aff20afde3f874be24fd483e9ec177162a0b412f2.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 107, + 384, + 236, + 469 + ], + "blocks": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "lines": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "spans": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "type": "text", + "content": "(g) 9 Characters" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 384, + 236, + 469 + ], + "lines": [ + { + "bbox": [ + 107, + 384, + 236, + 469 + ], + "spans": [ + { + "bbox": [ + 107, + 384, + 236, + 469 + ], + "type": "image", + "image_path": "c347b6d78546ea72564dfbc09bebe1fe6fe727ec0282cdd102864b3145b966a8.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 476, + 203, + 486 + ], + "lines": [ + { + "bbox": [ + 141, + 476, + 203, + 486 + ], + "spans": [ + { + "bbox": [ + 141, + 476, + 203, + 486 + ], + "type": "text", + "content": "(j) 12 Characters" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 492, + 504, + 526 + ], + "lines": [ + { + "bbox": [ + 104, + 492, + 504, + 526 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 504, + 526 + ], + "type": "text", + "content": "Figure 1: Effectiveness of DUMP on the K&K puzzle dataset mixed with 12 distributions defined by the number of characters in each puzzle (Setting 1). DUMP consistently achieves higher answer reward on test dataset compared to baseline. The model used here is Qwen2.5-7B-Instruct-1M." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 240, + 384, + 369, + 469 + ], + "blocks": [ + { + "bbox": [ + 273, + 372, + 337, + 381 + ], + "lines": [ + { + "bbox": [ + 273, + 372, + 337, + 381 + ], + "spans": [ + { + "bbox": [ + 273, + 372, + 337, + 381 + ], + "type": "text", + "content": "(h) 10 Characters" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 384, + 369, + 469 + ], + "lines": [ + { + "bbox": [ + 240, + 384, + 369, + 469 + ], + "spans": [ + { + "bbox": [ + 240, + 384, + 369, + 469 + ], + "type": "image", + "image_path": "c9c826488d46bd743a05c0379ad5bd3d7978b627c30c7548629b86defd59f338.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 273, + 476, + 337, + 485 + ], + "lines": [ + { + "bbox": [ + 273, + 476, + 337, + 485 + ], + "spans": [ + { + "bbox": [ + 273, + 476, + 337, + 485 + ], + "type": "text", + "content": "(k) 13 Characters" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 373, + 384, + 502, + 469 + ], + "blocks": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "lines": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "spans": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "type": "text", + "content": "(i) 11 Characters" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 384, + 502, + 469 + ], + "lines": [ + { + "bbox": [ + 373, + 384, + 502, + 469 + ], + "spans": [ + { + "bbox": [ + 373, + 384, + 502, + 469 + ], + "type": "image", + "image_path": "065491244f9bdf1954390d6968e94ab48062efc337aab6989e5479b161bec53f.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 407, + 476, + 468, + 485 + ], + "lines": [ + { + "bbox": [ + 407, + 476, + 468, + 485 + ], + "spans": [ + { + "bbox": [ + 407, + 476, + 468, + 485 + ], + "type": "text", + "content": "(1) 14 Characters" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "spans": [ + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "content": "greedy strategy significantly underperforms due to its lack of exploration—it tends to lock onto a single distribution early and fails to adapt. For instance, on the 13- and 14-character K&K tasks, the greedy variant achieves test answer rewards of " + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "inline_equation", + "content": "-0.91" + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "inline_equation", + "content": "-1.38" + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "content": ", while soft sampling reaches " + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "inline_equation", + "content": "-0.66" + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "inline_equation", + "content": "-1.16" + }, + { + "bbox": [ + 104, + 536, + 504, + 591 + ], + "type": "text", + "content": ", respectively. These results highlight the importance of maintaining exploration via a non-zero temperature to prevent the scheduler from collapsing onto suboptimal distributions." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 104, + 598, + 340, + 610 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 598, + 340, + 610 + ], + "spans": [ + { + "bbox": [ + 104, + 598, + 340, + 610 + ], + "type": "text", + "content": "4.4 Analyzing the Automated Curriculum by DUMP" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 612, + 506, + 723 + ], + "type": "text", + "content": "To understand how DUMP dynamically allocates training effort across data distributions, we analyze the sampling patterns induced by its UCB-based curriculum mechanism. Figure 2 shows the cumulative number of samples drawn from each distribution (3 to 14 characters) over the course of training on K&K puzzles with varying character numbers (Setting 1). We observe a clear curriculum-like progression: distributions corresponding to simpler puzzles (e.g., 3-5 characters) are heavily sampled in the early stages of training, while more complex distributions (e.g., 10-14 characters) are gradually introduced and increasingly prioritized as training progresses. This pattern aligns with the model's evolving capacity—early training favors distributions with high initial advantage magnitudes, and as the model saturates on those, DUMP shifts focus to underexplored but learnable distributions. Importantly, this adaptive sampling behavior emerges automatically from empirical advantage signals" + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 108, + 71, + 236, + 156 + ], + "blocks": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "lines": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "spans": [ + { + "bbox": [ + 108, + 71, + 236, + 156 + ], + "type": "image", + "image_path": "6cc6f36cb334ce3d8187d78cccfbbefe6196ca96877d726115347dfe7c271556.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 240, + 71, + 369, + 156 + ], + "blocks": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "lines": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "spans": [ + { + "bbox": [ + 240, + 71, + 369, + 156 + ], + "type": "image", + "image_path": "ed5ee9bc8d30f66370b8860be771fe2239d69990f22f96124e7b422658a99402.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 373, + 72, + 502, + 156 + ], + "blocks": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "lines": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "spans": [ + { + "bbox": [ + 373, + 72, + 502, + 156 + ], + "type": "image", + "image_path": "1093ff72a142fb03fb9d135eb344f89dcf33d2036e5144a4d0fbeb8fa8dcf81e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 107, + 175, + 236, + 262 + ], + "blocks": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "lines": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "spans": [ + { + "bbox": [ + 143, + 163, + 202, + 174 + ], + "type": "text", + "content": "(a) 3 Characters" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "lines": [ + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "spans": [ + { + "bbox": [ + 107, + 175, + 236, + 262 + ], + "type": "image", + "image_path": "3e862a18440592fab0f8c83e8df6b59f79b756e68b5338018918ce42306eedfc.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 240, + 175, + 369, + 261 + ], + "blocks": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "lines": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "spans": [ + { + "bbox": [ + 276, + 163, + 335, + 174 + ], + "type": "text", + "content": "(b) 4 Characters" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "lines": [ + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "spans": [ + { + "bbox": [ + 240, + 175, + 369, + 261 + ], + "type": "image", + "image_path": "bd7a606c29b4217534d1180cc89eb34e1ab773d0c9d135da70642a335ccb9d53.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 373, + 175, + 502, + 261 + ], + "blocks": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "lines": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "spans": [ + { + "bbox": [ + 408, + 163, + 468, + 174 + ], + "type": "text", + "content": "(c) 5 Characters" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "lines": [ + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "spans": [ + { + "bbox": [ + 373, + 175, + 502, + 261 + ], + "type": "image", + "image_path": "e28d82a879c78675b5c68c359b5de85522359e36fb32c3bf92a91e0ba286337c.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 107, + 279, + 236, + 365 + ], + "blocks": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "lines": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "spans": [ + { + "bbox": [ + 143, + 267, + 202, + 277 + ], + "type": "text", + "content": "(d) 6 Characters" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 279, + 236, + 365 + ], + "lines": [ + { + "bbox": [ + 107, + 279, + 236, + 365 + ], + "spans": [ + { + "bbox": [ + 107, + 279, + 236, + 365 + ], + "type": "image", + "image_path": "dbd799c502135303951605d7513f02860067f2b1e487881b6cb98b637df36867.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 240, + 280, + 369, + 365 + ], + "blocks": [ + { + "bbox": [ + 276, + 267, + 335, + 277 + ], + "lines": [ + { + "bbox": [ + 276, + 267, + 335, + 277 + ], + "spans": [ + { + "bbox": [ + 276, + 267, + 335, + 277 + ], + "type": "text", + "content": "(e) 7 Characters" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "lines": [ + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "spans": [ + { + "bbox": [ + 240, + 280, + 369, + 365 + ], + "type": "image", + "image_path": "8e0df7b6628cce08d19fc414d2c8e28e73e39228bfd705f4e6fc8edb1552b07e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 373, + 280, + 502, + 365 + ], + "blocks": [ + { + "bbox": [ + 408, + 267, + 467, + 277 + ], + "lines": [ + { + "bbox": [ + 408, + 267, + 467, + 277 + ], + "spans": [ + { + "bbox": [ + 408, + 267, + 467, + 277 + ], + "type": "text", + "content": "(f) 8 Characters" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "lines": [ + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "spans": [ + { + "bbox": [ + 373, + 280, + 502, + 365 + ], + "type": "image", + "image_path": "2aeecd41df07eb9cbc6a903e8cdfeb111270dc26d63b455ee2507cce81943392.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 107, + 383, + 236, + 469 + ], + "blocks": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "lines": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "spans": [ + { + "bbox": [ + 143, + 372, + 202, + 382 + ], + "type": "text", + "content": "(g) 9 Characters" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 107, + 383, + 236, + 469 + ], + "lines": [ + { + "bbox": [ + 107, + 383, + 236, + 469 + ], + "spans": [ + { + "bbox": [ + 107, + 383, + 236, + 469 + ], + "type": "image", + "image_path": "33dda47032ad4c964efebd5b8681521466c3fa0dc248e1d8fc4faf3f6cfeb8ab.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 141, + 475, + 203, + 486 + ], + "lines": [ + { + "bbox": [ + 141, + 475, + 203, + 486 + ], + "spans": [ + { + "bbox": [ + 141, + 475, + 203, + 486 + ], + "type": "text", + "content": "(j) 12 Characters" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 104, + 492, + 506, + 548 + ], + "lines": [ + { + "bbox": [ + 104, + 492, + 506, + 548 + ], + "spans": [ + { + "bbox": [ + 104, + 492, + 506, + 548 + ], + "type": "text", + "content": "Figure 2: Curriculum (sample counts) induced by DUMP across 12 K&K puzzle distributions with increasing difficulty defined by the number of characters in each puzzle (Setting 1). Simpler distributions are automatically prioritized in early training, while more complex ones are progressively emphasized—both in an entirely automated manner—demonstrating automated distribution scheduling." + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 240, + 383, + 369, + 469 + ], + "blocks": [ + { + "bbox": [ + 272, + 372, + 337, + 381 + ], + "lines": [ + { + "bbox": [ + 272, + 372, + 337, + 381 + ], + "spans": [ + { + "bbox": [ + 272, + 372, + 337, + 381 + ], + "type": "text", + "content": "(h) 10 Characters" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 240, + 383, + 369, + 469 + ], + "lines": [ + { + "bbox": [ + 240, + 383, + 369, + 469 + ], + "spans": [ + { + "bbox": [ + 240, + 383, + 369, + 469 + ], + "type": "image", + "image_path": "f410bc2ba3a4e44d2bc0ad93a48a6847bf24a13c070787cb53ce0a6504aa2275.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 272, + 475, + 337, + 486 + ], + "lines": [ + { + "bbox": [ + 272, + 475, + 337, + 486 + ], + "spans": [ + { + "bbox": [ + 272, + 475, + 337, + 486 + ], + "type": "text", + "content": "(k) 13 Characters" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 373, + 383, + 502, + 469 + ], + "blocks": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "lines": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "spans": [ + { + "bbox": [ + 407, + 372, + 468, + 381 + ], + "type": "text", + "content": "(i) 11 Characters" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 373, + 383, + 502, + 469 + ], + "lines": [ + { + "bbox": [ + 373, + 383, + 502, + 469 + ], + "spans": [ + { + "bbox": [ + 373, + 383, + 502, + 469 + ], + "type": "image", + "image_path": "767d20fa7a31ff3d7e8d2f7693228a3d1141ed28620a51d6d9c94e599e75bd3c.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 407, + 475, + 468, + 485 + ], + "lines": [ + { + "bbox": [ + 407, + 475, + 468, + 485 + ], + "spans": [ + { + "bbox": [ + 407, + 475, + 468, + 485 + ], + "type": "text", + "content": "(1) 14 Characters" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_caption" + } + ], + "index": 22 + }, + { + "bbox": [ + 104, + 567, + 504, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 567, + 504, + 601 + ], + "spans": [ + { + "bbox": [ + 104, + 567, + 504, + 601 + ], + "type": "text", + "content": "without requiring manual specification of curriculum order. These results highlight DUMP's ability to construct an implicit, data-driven curriculum that mirrors traditional easy-to-hard strategies, while remaining responsive to online training dynamics." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 105, + 620, + 185, + 632 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 620, + 185, + 632 + ], + "spans": [ + { + "bbox": [ + 105, + 620, + 185, + 632 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 645, + 506, + 723 + ], + "type": "text", + "content": "In this work, we introduce a distribution-level curriculum learning framework for RL-based posttraining of large language models. DUMP leverages the expected absolute advantage as a learnability signal to adaptively allocate training focus across heterogeneous distributions. By formalizing scheduling as a multi-armed bandit and adopting a UCB-based sampling strategy, DUMP balances exploitation and exploration in a principled way. Experiments demonstrate that DUMP consistently improves convergence and final performance over baselines. These results highlight the value of distribution-aware curriculum learning in LLM RL post-training." + } + ] + } + ], + "index": 27 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 28 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "spans": [ + { + "bbox": [ + 106, + 71, + 165, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 90, + 505, + 721 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 111, + 90, + 505, + 134 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 90, + 505, + 134 + ], + "spans": [ + { + "bbox": [ + 111, + 90, + 505, + 134 + ], + "type": "text", + "content": "[1] Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744, 2022." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 111, + 144, + 505, + 178 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 144, + 505, + 178 + ], + "spans": [ + { + "bbox": [ + 111, + 144, + 505, + 178 + ], + "type": "text", + "content": "[2] Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. Direct preference optimization: Your language model is secretly a reward model. Advances in Neural Information Processing Systems, 36:53728-53741, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "spans": [ + { + "bbox": [ + 111, + 186, + 505, + 220 + ], + "type": "text", + "content": "[3] Zhihong Shao, Peiyi Wang, Qihao Zhu, Runxin Xu, Junxiao Song, Xiao Bi, Haowei Zhang, Mingchuan Zhang, YK Li, Y Wu, et al. Deepseekmath: Pushing the limits of mathematical reasoning in open language models. arXiv preprint arXiv:2402.03300, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 111, + 229, + 505, + 263 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 229, + 505, + 263 + ], + "spans": [ + { + "bbox": [ + 111, + 229, + 505, + 263 + ], + "type": "text", + "content": "[4] Daya Guo, Dejian Yang, Haowei Zhang, Junxiao Song, Ruoyu Zhang, Runxin Xu, Qihao Zhu, Shirong Ma, Peiyi Wang, Xiao Bi, et al. Deepseek-r1: Incentivizing reasoning capability in llms via reinforcement learning. arXiv preprint arXiv:2501.12948, 2025." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 111, + 272, + 505, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 272, + 505, + 316 + ], + "spans": [ + { + "bbox": [ + 111, + 272, + 505, + 316 + ], + "type": "text", + "content": "[5] Shayne Longpre, Le Hou, Tu Vu, Albert Webson, Hyung Won Chung, Yi Tay, Denny Zhou, Quoc V Le, Barret Zoph, Jason Wei, et al. The flan collection: Designing data and methods for effective instruction tuning. In International Conference on Machine Learning, pages 22631-22648. PMLR, 2023." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 111, + 325, + 505, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 325, + 505, + 360 + ], + "spans": [ + { + "bbox": [ + 111, + 325, + 505, + 360 + ], + "type": "text", + "content": "[6] Harrison Lee, Samrat Phatale, Hassan Mansoor, Thomas Mesnard, Johan Ferret, Kellie Lu, Colton Bishop, Ethan Hall, Victor Carbune, Abhinav Rastogi, et al. Rlaif: Scaling reinforcement learning from human feedback with ai feedback. arXiv e-prints, pages arXiv-2309, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 111, + 369, + 505, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 369, + 505, + 403 + ], + "spans": [ + { + "bbox": [ + 111, + 369, + 505, + 403 + ], + "type": "text", + "content": "[7] Nathan Lambert, Jacob Morrison, Valentina Pyatkin, Shengyi Huang, Hamish Ivison, Faeze Brahman, Lester James V Miranda, Alisa Liu, Nouha Dziri, Shane Lyu, et al. T\\''ulu 3: Pushing frontiers in open language model post-training. arXiv preprint arXiv:2411.15124, 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 111, + 411, + 505, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 411, + 505, + 445 + ], + "spans": [ + { + "bbox": [ + 111, + 411, + 505, + 445 + ], + "type": "text", + "content": "[8] ByteDance Seed. Seed-thinking-v1.5: Advancing superb reasoning models with reinforcement learning. Technical report, ByteDance, 2025. URL https://github.com/ByteDance-Seed/Seed-Thinking-v1.5." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 111, + 453, + 505, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 111, + 453, + 505, + 497 + ], + "spans": [ + { + "bbox": [ + 111, + 453, + 505, + 497 + ], + "type": "text", + "content": "[9] Pulkit Pattnaik, Rishabh Maheshwary, Kelechi Ogueji, Vikas Yadav, and Sathwik Tejaswi Madhusudhan. Enhancing alignment using curriculum learning & ranked preferences. In Findings of the Association for Computational Linguistics: EMNLP 2024, pages 12891-12907, 2024." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "spans": [ + { + "bbox": [ + 106, + 506, + 505, + 541 + ], + "type": "text", + "content": "[10] Kimi Team, Angang Du, Bofei Gao, Bowei Xing, Changjiu Jiang, Cheng Chen, Cheng Li, Chenjun Xiao, Chenzhuang Du, Chonghua Liao, et al. Kimi k1. 5: Scaling reinforcement learning with llms. arXiv preprint arXiv:2501.12599, 2025." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "spans": [ + { + "bbox": [ + 106, + 548, + 505, + 584 + ], + "type": "text", + "content": "[11] Tian Xie, Zitian Gao, Qingnan Ren, Haoming Luo, Yuqian Hong, Bryan Dai, Joey Zhou, Kai Qiu, Zhirong Wu, and Chong Luo. Logic-rl: Unleashing llm reasoning with rule-based reinforcement learning. arXiv preprint arXiv:2502.14768, 2025." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 592, + 505, + 626 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 592, + 505, + 626 + ], + "spans": [ + { + "bbox": [ + 106, + 592, + 505, + 626 + ], + "type": "text", + "content": "[12] Paul F Christiano, Jan Leike, Tom Brown, Miljan Martic, Shane Legg, and Dario Amodei. Deep reinforcement learning from human preferences. Advances in neural information processing systems, 30, 2017." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 635, + 505, + 669 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 635, + 505, + 669 + ], + "spans": [ + { + "bbox": [ + 106, + 635, + 505, + 669 + ], + "type": "text", + "content": "[13] Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593, 2019." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 677, + 505, + 721 + ], + "type": "text", + "content": "[14] Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862, 2022." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 72, + 506, + 721 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "text", + "content": "[15] Amelia Glaese, Nat McAleese, Maja Trebacz, John Aslanides, Vlad Firoiu, Timo Ewalds, Maribeth Rauh, Laura Weidinger, Martin Chadwick, Phoebe Thacker, et al. Improving alignment of dialogue agents via targeted human judgements. arXiv preprint arXiv:2209.14375, 2022." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 114, + 506, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 114, + 506, + 137 + ], + "spans": [ + { + "bbox": [ + 106, + 114, + 506, + 137 + ], + "type": "text", + "content": "[16] OpenAI. Learning to reason with llms. Technical report, OpenAI, 2024. URL https://openai.com/index/learning-to-reason-with-llms/." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 144, + 506, + 167 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 144, + 506, + 167 + ], + "spans": [ + { + "bbox": [ + 107, + 144, + 506, + 167 + ], + "type": "text", + "content": "[17] OpenAI. Openai o3-mini. Technical report, OpenAI, 2025. URL https://openai.com/index/openai-o3-mini/." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 175, + 504, + 209 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 175, + 504, + 209 + ], + "spans": [ + { + "bbox": [ + 106, + 175, + 504, + 209 + ], + "type": "text", + "content": "[18] Ahmed El-Kishky, Alexander Wei, Andre Saraiva, Borys Minaiev, Daniel Selsam, David Dohan, Francis Song, Hunter Lightman, Ignasi Clavera, Jakub Pachocki, et al. Competitive programming with large reasoning models. arXiv preprint arXiv:2502.06807, 2025." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 216, + 506, + 250 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 216, + 506, + 250 + ], + "spans": [ + { + "bbox": [ + 105, + 216, + 506, + 250 + ], + "type": "text", + "content": "[19] Aaron Hurst, Adam Lerer, Adam P Goucher, Adam Perelman, Aditya Ramesh, Aidan Clark, AJ Ostrow, Akila Welihinda, Alan Hayes, Alec Radford, et al. Gpt-4o system card. arXiv preprint arXiv:2410.21276, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 106, + 257, + 506, + 291 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 257, + 506, + 291 + ], + "spans": [ + { + "bbox": [ + 106, + 257, + 506, + 291 + ], + "type": "text", + "content": "[20] Aixin Liu, Bei Feng, Bing Xue, Bingxuan Wang, Bochao Wu, Chengda Lu, Chenggang Zhao, Chengqi Deng, Chenyu Zhang, Chong Ruan, et al. Deepseek-v3 technical report. arXiv preprint arXiv:2412.19437, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 105, + 298, + 506, + 322 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 298, + 506, + 322 + ], + "spans": [ + { + "bbox": [ + 105, + 298, + 506, + 322 + ], + "type": "text", + "content": "[21] Aime_1983_2024 (revision 6283828), 2025. URL https://huggingface.co/datasets/di-zhang-fdu/AIME_1983_2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 105, + 328, + 488, + 341 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 328, + 488, + 341 + ], + "spans": [ + { + "bbox": [ + 105, + 328, + 488, + 341 + ], + "type": "text", + "content": "[22] Mikhail Mirzayanov. Codeforces. https://codeforces.com/. Accessed: 2025-04-13." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 106, + 348, + 504, + 372 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 348, + 504, + 372 + ], + "spans": [ + { + "bbox": [ + 106, + 348, + 504, + 372 + ], + "type": "text", + "content": "[23] John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347, 2017." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 106, + 379, + 506, + 412 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 379, + 506, + 412 + ], + "spans": [ + { + "bbox": [ + 106, + 379, + 506, + 412 + ], + "type": "text", + "content": "[24] Yoshua Bengio, Jérôme Louradour, Ronan Collobert, and Jason Weston. Curriculum learning. In Proceedings of the 26th annual international conference on machine learning, pages 41-48, 2009." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 106, + 420, + 506, + 454 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 420, + 506, + 454 + ], + "spans": [ + { + "bbox": [ + 106, + 420, + 506, + 454 + ], + "type": "text", + "content": "[25] Alex Graves, Marc G Bellemare, Jacob Menick, Remi Munos, and Koray Kavukcuoglu. Automated curriculum learning for neural networks. In international conference on machine learning, pages 1311-1320. Pmlr, 2017." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 106, + 461, + 504, + 496 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 461, + 504, + 496 + ], + "spans": [ + { + "bbox": [ + 106, + 461, + 504, + 496 + ], + "type": "text", + "content": "[26] Niels Justesen, Ruben Rodriguez Torrado, Philip Bontrager, Ahmed Khalifa, Julian Togelius, and Sebastian Risi. Illuminating generalization in deep reinforcement learning through procedural level generation. arXiv preprint arXiv:1806.10729, 2018." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 106, + 502, + 504, + 537 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 502, + 504, + 537 + ], + "spans": [ + { + "bbox": [ + 106, + 502, + 504, + 537 + ], + "type": "text", + "content": "[27] Rui Wang, Joel Lehman, Jeff Clune, and Kenneth O Stanley. Paired open-ended trailblazer (poet): Endlessly generating increasingly complex and diverse learning environments and their solutions. arXiv preprint arXiv:1901.01753, 2019." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 106, + 544, + 504, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 544, + 504, + 578 + ], + "spans": [ + { + "bbox": [ + 106, + 544, + 504, + 578 + ], + "type": "text", + "content": "[28] Richard Li, Allan Jabri, Trevor Darrell, and Pulkit Agrawal. Towards practical multi-object manipulation using relational reinforcement learning. In 2020 IEEE international conference on robotics and automation (icra), pages 4051-4058. IEEE, 2020." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 105, + 586, + 506, + 609 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 586, + 506, + 609 + ], + "spans": [ + { + "bbox": [ + 105, + 586, + 506, + 609 + ], + "type": "text", + "content": "[29] Tambet Matiisen, Avital Oliver, Taco Cohen, and John Schulman. Teacher-student curriculum learning. IEEE transactions on neural networks and learning systems, 31(9):3732-3740, 2019." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 106, + 616, + 504, + 651 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 616, + 504, + 651 + ], + "spans": [ + { + "bbox": [ + 106, + 616, + 504, + 651 + ], + "type": "text", + "content": "[30] Rémy Portelas, Cédric Colas, Katja Hofmann, and Pierre-Yves Oudeyer. Teacher algorithms for curriculum learning of deep rl in continuously parameterized environments. In Conference on Robot Learning, pages 835-853. PMLR, 2020." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 105, + 658, + 504, + 681 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 658, + 504, + 681 + ], + "spans": [ + { + "bbox": [ + 105, + 658, + 504, + 681 + ], + "type": "text", + "content": "[31] Peter Auer, Nicolo Cesa-Bianchi, and Paul Fischer. Finite-time analysis of the multiarmed bandit problem. Machine learning, 47:235-256, 2002." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 106, + 689, + 504, + 721 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 689, + 504, + 721 + ], + "spans": [ + { + "bbox": [ + 106, + 689, + 504, + 721 + ], + "type": "text", + "content": "[32] An Yang, Baosong Yang, Beichen Zhang, Binyuan Hui, Bo Zheng, Bowen Yu, Chengyuan Li, Dayiheng Liu, Fei Huang, Haoran Wei, et al. Qwen2.5 technical report. arXiv preprint arXiv:2412.15115, 2024." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 410 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "spans": [ + { + "bbox": [ + 106, + 72, + 506, + 106 + ], + "type": "text", + "content": "[33] Chulin Xie, Yangsibo Huang, Chiyuan Zhang, Da Yu, Xinyun Chen, Bill Yuchen Lin, Bo Li, Badih Ghazi, and Ravi Kumar. On memorization of large language models in logical reasoning. arXiv preprint arXiv:2410.23123, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 106, + 112, + 506, + 136 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 112, + 506, + 136 + ], + "spans": [ + { + "bbox": [ + 106, + 112, + 506, + 136 + ], + "type": "text", + "content": "[34] Peter Clark, Oyvind Tafjord, and Kyle Richardson. Transformers as soft reasoners over language. arXiv preprint arXiv:2002.05867, 2020." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 107, + 142, + 506, + 166 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 142, + 506, + 166 + ], + "spans": [ + { + "bbox": [ + 107, + 142, + 506, + 166 + ], + "type": "text", + "content": "[35] Oyvind Tafjord, Bhavana Dalvi Mishra, and Peter Clark. Proofwriter: Generating implications, proofs, and abductive statements over natural language. arXiv preprint arXiv:2012.13048, 2020." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 106, + 172, + 504, + 205 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 172, + 504, + 205 + ], + "spans": [ + { + "bbox": [ + 106, + 172, + 504, + 205 + ], + "type": "text", + "content": "[36] Wanjun Zhong, Siyuan Wang, Duyu Tang, Zenan Xu, Daya Guo, Jiahai Wang, Jian Yin, Ming Zhou, and Nan Duan. Ar-lsat: Investigating analytical reasoning of text. arXiv preprint arXiv:2104.06598, 2021." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 107, + 213, + 504, + 246 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 213, + 504, + 246 + ], + "spans": [ + { + "bbox": [ + 107, + 213, + 504, + 246 + ], + "type": "text", + "content": "[37] Jian Liu, Leyang Cui, Hanmeng Liu, Dandan Huang, Yile Wang, and Yue Zhang. Logiqa: A challenge dataset for machine reading comprehension with logical reasoning. arXiv preprint arXiv:2007.08124, 2020." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 107, + 254, + 504, + 288 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 254, + 504, + 288 + ], + "spans": [ + { + "bbox": [ + 107, + 254, + 504, + 288 + ], + "type": "text", + "content": "[38] Jidong Tian, Yitian Li, Wenqing Chen, Liqiang Xiao, Hao He, and Yaohui Jin. Diagnosing the first-order logical reasoning ability through logicli. In Proceedings of the 2021 Conference on Empirical Methods in Natural Language Processing, pages 3738-3747, 2021." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 107, + 294, + 504, + 318 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 294, + 504, + 318 + ], + "spans": [ + { + "bbox": [ + 107, + 294, + 504, + 318 + ], + "type": "text", + "content": "[39] Tianle Li, Ge Zhang, Quy Duc Do, Xiang Yue, and Wenhu Chen. Long-context llms struggle with long in-context learning. arXiv preprint arXiv:2404.02060, 2024." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 106, + 324, + 506, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 106, + 324, + 506, + 367 + ], + "spans": [ + { + "bbox": [ + 106, + 324, + 506, + 367 + ], + "type": "text", + "content": "[40] Karl Cobbe, Vineet Kosaraju, Mohammad Bavarian, Mark Chen, Heewoo Jun, Lukasz Kaiser, Matthias Plappert, Jerry Tworek, Jacob Hilton, Reiichiro Nakano, Christopher Hesse, and John Schulman. Training verifiers to solve math word problems. arXiv preprint arXiv:2110.14168, 2021." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 107, + 375, + 504, + 410 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 107, + 375, + 504, + 410 + ], + "spans": [ + { + "bbox": [ + 107, + 375, + 504, + 410 + ], + "type": "text", + "content": "[41] Guangming Sheng, Chi Zhang, Zilingfeng Ye, Xibin Wu, Wang Zhang, Ru Zhang, Yanghua Peng, Haibin Lin, and Chuan Wu. Hybridflow: A flexible and efficient rlhf framework. arXiv preprint arXiv: 2409.19256, 2024." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 105, + 71, + 242, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 71, + 242, + 83 + ], + "spans": [ + { + "bbox": [ + 105, + 71, + 242, + 83 + ], + "type": "text", + "content": "A Proof for Theorem 3.1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "spans": [ + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": "Theorem A.1 (Expected Advantage Magnitude Reflects Learnability). Given a policy " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": " and a data distribution " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": ", the expected absolute advantage " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\mathbb{E}_{x\\sim d}\\left[\\mathbb{E}_{o_i\\sim \\pi_\\theta (\\cdot |x)}\\left[|\\hat{A}_i|\\right]\\right]" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": " serves as a proxy for how much that distribution " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": " can help the model improve, where the distribution " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": " consisting of prompts " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "x\\sim d" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": ", each prompt has a group of sampled outputs " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\{o_1,\\ldots ,o_n\\}" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": ", and " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "\\hat{A}_i" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": " denotes the advantage of output " + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 95, + 506, + 160 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "spans": [ + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": "Proof. Let " + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": " be the current model policy. Consider a data distribution " + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "inline_equation", + "content": "x \\sim d" + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": " are prompts and " + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "inline_equation", + "content": "\\{o_1, \\ldots, o_n\\} \\sim \\pi_{\\theta}(\\cdot | x)" + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": " are sampled outputs. For each output " + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 171, + 504, + 194 + ], + "type": "text", + "content": ", the advantage is estimated as" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 271, + 198, + 339, + 212 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 271, + 198, + 339, + 212 + ], + "spans": [ + { + "bbox": [ + 271, + 198, + 339, + 212 + ], + "type": "interline_equation", + "content": "\\hat {A} _ {i} = r _ {i} - b (x),", + "image_path": "79f9c919c43934b19c402e03573f142657eebe1ce865728baa48e24d0c55be98.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "spans": [ + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "inline_equation", + "content": "r_i" + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "text", + "content": " is the reward assigned to " + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "inline_equation", + "content": "o_i" + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "inline_equation", + "content": "b(x)" + }, + { + "bbox": [ + 104, + 216, + 506, + 239 + ], + "type": "text", + "content": " is a baseline (e.g., the mean reward over the group). The policy gradient under common policy-gradient methods (e.g., PPO or GRPO) can be written as:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 188, + 243, + 419, + 264 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 188, + 243, + 419, + 264 + ], + "spans": [ + { + "bbox": [ + 188, + 243, + 419, + 264 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {J} (\\theta) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ \\hat {A} _ {i} \\cdot \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\right] \\right].", + "image_path": "5e98d8f8024b264445999ca7a7e4f5e5107c60f782875c981cbcda40fc3bb2ed.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 272, + 504, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 272, + 504, + 295 + ], + "spans": [ + { + "bbox": [ + 104, + 272, + 504, + 295 + ], + "type": "text", + "content": "Now consider the magnitude of the gradient vector. The strength of the training signal from " + }, + { + "bbox": [ + 104, + 272, + 504, + 295 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 272, + 504, + 295 + ], + "type": "text", + "content": " depends on the expected norm of the gradient, which is bounded below by:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 176, + 299, + 432, + 321 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 299, + 432, + 321 + ], + "spans": [ + { + "bbox": [ + 176, + 299, + 432, + 321 + ], + "type": "interline_equation", + "content": "\\left\\| \\nabla_ {\\theta} \\mathcal {J} (\\theta) \\right\\| \\gtrsim \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\cdot \\| \\nabla_ {\\theta} \\log \\pi_ {\\theta} (o _ {i} \\mid x) \\| \\right] \\right].", + "image_path": "fa48ca1b252482ec6ec16d8ea638822a6fb14e9c29d445c5b397181341cfa44e.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "spans": [ + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "text", + "content": "Assuming that " + }, + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "inline_equation", + "content": "\\| \\nabla_{\\theta}\\log \\pi_{\\theta}(o_i\\mid x)\\|" + }, + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "text", + "content": " is bounded and varies slowly across " + }, + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 328, + 504, + 351 + ], + "type": "text", + "content": ", the dominant term affecting the gradient norm is:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 247, + 350, + 361, + 371 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 247, + 350, + 361, + 371 + ], + "spans": [ + { + "bbox": [ + 247, + 350, + 361, + 371 + ], + "type": "interline_equation", + "content": "\\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o _ {i} \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} _ {i} | \\right] \\right].", + "image_path": "0f49106ee4483579e10efda00fc1881b3a87c0a27aad840d46e59cb4e38f969f.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "spans": [ + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "text", + "content": "Thus, the expected absolute advantage serves as a proxy for the learning signal magnitude contributed by distribution " + }, + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "text", + "content": ". The expected absolute advantage reflects how much training on distribution " + }, + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "inline_equation", + "content": "d" + }, + { + "bbox": [ + 104, + 377, + 504, + 411 + ], + "type": "text", + "content": " can improve the model parameters, making it a suitable signal for curriculum scheduling." + } + ] + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 494, + 415, + 504, + 425 + ], + "blocks": [ + { + "bbox": [ + 494, + 415, + 504, + 425 + ], + "lines": [ + { + "bbox": [ + 494, + 415, + 504, + 425 + ], + "spans": [ + { + "bbox": [ + 494, + 415, + 504, + 425 + ], + "type": "image", + "image_path": "ed7c1f9d1fcdd8ad872637f61821fac0d86b9eb068d8a228df80f01f4ae8e37b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 104, + 441, + 458, + 456 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 441, + 458, + 456 + ], + "spans": [ + { + "bbox": [ + 104, + 441, + 458, + 456 + ], + "type": "text", + "content": "B Theoretical Justification for UCB-Based Distribution Scheduling" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 466, + 504, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 466, + 504, + 500 + ], + "spans": [ + { + "bbox": [ + 104, + 466, + 504, + 500 + ], + "type": "text", + "content": "We provide a theoretical justification for using Upper Confidence Bound (UCB) as a strategy for scheduling training over data distributions in RL-based post-training. Our objective is to maximize the cumulative learnability gain over " + }, + { + "bbox": [ + 104, + 466, + 504, + 500 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 466, + 504, + 500 + ], + "type": "text", + "content": " training steps, defined as:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 170, + 504, + 438, + 536 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 170, + 504, + 438, + 536 + ], + "spans": [ + { + "bbox": [ + 170, + 504, + 438, + 536 + ], + "type": "interline_equation", + "content": "\\max _ {\\{d _ {t} \\} _ {t = 1} ^ {T}} \\sum_ {t = 1} ^ {T} L (d _ {t}), \\quad \\text {w h e r e} \\quad L (d) = \\mathbb {E} _ {x \\sim d} \\left[ \\mathbb {E} _ {o \\sim \\pi_ {\\theta} (\\cdot | x)} \\left[ | \\hat {A} (o) | \\right] \\right].", + "image_path": "9bfa221ac6381956061ca7ddf44381fc6c6d521915da4f96c930dfbeaff0ce55.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "spans": [ + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": "This setting can be viewed as a stochastic multi-armed bandit (MAB) problem, where each data distribution " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "d_{j} \\in \\mathcal{D}" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": " corresponds to an arm with unknown reward " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "L(d_{j})" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": ", interpreted as the expected absolute advantage from training on samples from " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": ". At each training step " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": ", the learner selects a distribution " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "d_{t}" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": " and obtains an empirical reward " + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "inline_equation", + "content": "\\hat{L}(d_{t})" + }, + { + "bbox": [ + 104, + 546, + 504, + 602 + ], + "type": "text", + "content": " by averaging the absolute advantages observed in the batch." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 104, + 609, + 504, + 631 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 609, + 504, + 631 + ], + "spans": [ + { + "bbox": [ + 104, + 609, + 504, + 631 + ], + "type": "text", + "content": "We define the regret as the gap between the cumulative learnability gain of the best fixed distribution " + }, + { + "bbox": [ + 104, + 609, + 504, + 631 + ], + "type": "inline_equation", + "content": "d^{*} = \\arg \\max_{d}L(d)" + }, + { + "bbox": [ + 104, + 609, + 504, + 631 + ], + "type": "text", + "content": " and that of the learner's actual selections:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 228, + 635, + 381, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 635, + 381, + 669 + ], + "spans": [ + { + "bbox": [ + 228, + 635, + 381, + 669 + ], + "type": "interline_equation", + "content": "\\operatorname {R e g r e t} (T) = \\sum_ {t = 1} ^ {T} L \\left(d ^ {*}\\right) - \\sum_ {t = 1} ^ {T} L \\left(d _ {t}\\right).", + "image_path": "d8f2b5ddc5aeb603604f13e42282b57951931d142166de91e5f5fae103b11cdf.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 104, + 677, + 345, + 690 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 677, + 345, + 690 + ], + "spans": [ + { + "bbox": [ + 104, + 677, + 345, + 690 + ], + "type": "text", + "content": "To analyze this regret, we make the following assumptions:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "spans": [ + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": "1. For each distribution " + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": ", the per-output absolute advantages " + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "|\\hat{A}(o)|" + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "o \\sim \\pi_{\\theta}(\\cdot|x)" + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": ", are i.i.d. and bounded in " + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "[0, C]" + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": " for some constant " + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "inline_equation", + "content": "C > 0" + }, + { + "bbox": [ + 129, + 699, + 504, + 723 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 127, + 72, + 504, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 72, + 504, + 95 + ], + "spans": [ + { + "bbox": [ + 127, + 72, + 504, + 95 + ], + "type": "text", + "content": "2. The true expected advantage " + }, + { + "bbox": [ + 127, + 72, + 504, + 95 + ], + "type": "inline_equation", + "content": "L(d_{j})" + }, + { + "bbox": [ + 127, + 72, + 504, + 95 + ], + "type": "text", + "content": " remains approximately stationary over a local training window, enabling meaningful online adaptation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "spans": [ + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "text", + "content": "Note: In practice, we can clip or normalize " + }, + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "inline_equation", + "content": "|\\hat{A}(o)|" + }, + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "text", + "content": " to satisfy the boundedness condition. The introduction of the constant " + }, + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "inline_equation", + "content": "C" + }, + { + "bbox": [ + 104, + 106, + 504, + 140 + ], + "type": "text", + "content": " only scales the regret by a constant factor and does not affect the asymptotic rate of convergence." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 105, + 144, + 346, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 144, + 346, + 156 + ], + "spans": [ + { + "bbox": [ + 105, + 144, + 346, + 156 + ], + "type": "text", + "content": "Under these assumptions, the following regret bound holds:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "spans": [ + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": "Theorem B.1. Let " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{d_1, \\ldots, d_N\\}" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": " be a set of data distributions with fixed expected rewards " + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "inline_equation", + "content": "L(d_j) \\in [0, C]" + }, + { + "bbox": [ + 104, + 158, + 504, + 192 + ], + "type": "text", + "content": ". Then, applying the UCB1 algorithm to the empirical reward observations yields the regret bound:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 159, + 198, + 451, + 236 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 159, + 198, + 451, + 236 + ], + "spans": [ + { + "bbox": [ + 159, + 198, + 451, + 236 + ], + "type": "interline_equation", + "content": "R e g r e t (T) \\leq O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right), \\quad w h e r e \\quad \\Delta_ {j} = L \\left(d ^ {*}\\right) - L \\left(d _ {j}\\right).", + "image_path": "1226dd8c5b516281dc668f8a76af77e66716029ddc4798fe57b30d5decfe1308.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "spans": [ + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": "Proof. This result is a direct application of the classical UCB1 regret bound [31], extended to the case where reward values lie in " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "[0, C]" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": ". Let " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "d^{*} = \\arg \\max_{d} L(d)" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": " be the optimal distribution, and let " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "\\Delta_{j} = L(d^{*}) - L(d_{j})" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": " denote the suboptimality gap for each arm " + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 104, + 247, + 504, + 281 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "spans": [ + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "text", + "content": "At each time step " + }, + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "text", + "content": ", UCB1 selects the distribution " + }, + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "inline_equation", + "content": "d_{j}" + }, + { + "bbox": [ + 104, + 285, + 485, + 297 + ], + "type": "text", + "content": " with the highest upper confidence bound:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 233, + 303, + 377, + 335 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 303, + 377, + 335 + ], + "spans": [ + { + "bbox": [ + 233, + 303, + 377, + 335 + ], + "type": "interline_equation", + "content": "\\mathbf {U C B} (d _ {j}) = \\hat {L} (d _ {j}) + \\sqrt {\\frac {2 C ^ {2} \\log t}{n _ {j}}},", + "image_path": "625199cc1fa3533c68203d2c91a06e76acecc9cb32fe48d4eb476ca018029b80.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "spans": [ + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "n_j" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " is the number of times distribution " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "d_j" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " has been sampled so far, and " + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "inline_equation", + "content": "\\hat{L}(d_j)" + }, + { + "bbox": [ + 104, + 342, + 504, + 365 + ], + "type": "text", + "content": " is the empirical mean of observed rewards (mean absolute advantages)." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "spans": [ + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "text", + "content": "Under the assumptions that rewards are i.i.d. and bounded in " + }, + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "inline_equation", + "content": "[0, C]" + }, + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "text", + "content": ", the Hoeffding inequality guarantees that with high probability the empirical mean concentrates around the true mean " + }, + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "inline_equation", + "content": "L(d_{j})" + }, + { + "bbox": [ + 104, + 369, + 506, + 415 + ], + "type": "text", + "content": ", and the UCB selection mechanism will only pick suboptimal arms a logarithmic number of times. Based on UCB1 regret bound [31], The cumulative regret is therefore bounded by:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 209, + 419, + 400, + 451 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 209, + 419, + 400, + 451 + ], + "spans": [ + { + "bbox": [ + 209, + 419, + 400, + 451 + ], + "type": "interline_equation", + "content": "\\operatorname {R e g r e t} (T) \\leq \\sum_ {j: \\Delta_ {j} > 0} \\left(\\frac {8 C ^ {2} \\log T}{\\Delta_ {j}} + O (\\Delta_ {j})\\right),", + "image_path": "cf05bc8e173440ec3b5833dd318f2a494d32097e8722b17d7a6a65efc8535d9a.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 104, + 456, + 301, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 456, + 301, + 468 + ], + "spans": [ + { + "bbox": [ + 104, + 456, + 301, + 468 + ], + "type": "text", + "content": "which simplifies to the stated asymptotic bound:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 226, + 473, + 383, + 512 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 473, + 383, + 512 + ], + "spans": [ + { + "bbox": [ + 226, + 473, + 383, + 512 + ], + "type": "interline_equation", + "content": "\\operatorname {R e g r e t} (T) = O \\left(C \\cdot \\sum_ {j: \\Delta_ {j} > 0} \\frac {\\log T}{\\Delta_ {j}}\\right).", + "image_path": "75809fff8a862bfb1cb12056d3e489ba26f74e510e9defb68d0e26568b6acb1d.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "spans": [ + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "text", + "content": "This result shows that our distribution-level scheduling strategy, when driven by UCB over empirical advantage rewards, is provably efficient. It dynamically concentrates training on distributions with high estimated learnability while ensuring sufficient exploration, with regret that scales logarithmically in " + }, + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "text", + "content": " and linearly in " + }, + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "inline_equation", + "content": "1 / \\Delta_j" + }, + { + "bbox": [ + 104, + 539, + 504, + 585 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 104, + 599, + 320, + 613 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 599, + 320, + 613 + ], + "spans": [ + { + "bbox": [ + 104, + 599, + 320, + 613 + ], + "type": "text", + "content": "C Comparison to Heuristic Curriculum" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "spans": [ + { + "bbox": [ + 104, + 624, + 506, + 723 + ], + "type": "text", + "content": "Heuristic curricula, which manually specify a fixed training schedule over data distributions—e.g., training on Distribution A for N steps before switching to Distribution B—have been explored in prior work [11, 10], particularly in environments where task difficulty or domain progression is well understood. However, such approaches have several limitations that make them less suitable for our setting. First, effective heuristic scheduling requires strong prior knowledge about the relative difficulty and learnability of each distribution. In our setting, which involves diverse domains such as logic reasoning, mathematics, and programming, such prior knowledge is often unavailable or misleading. For example, a distribution may appear \"easier\" but provide low learning signal, or seem \"harder\" but actually yield high gradient utility. This makes it extremely difficult to construct" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 311, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 118, + 76, + 187, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 118, + 76, + 187, + 85 + ], + "spans": [ + { + "bbox": [ + 118, + 76, + 187, + 85 + ], + "type": "text", + "content": "Example of Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 116, + 88, + 501, + 135 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 88, + 501, + 135 + ], + "spans": [ + { + "bbox": [ + 116, + 88, + 501, + 135 + ], + "type": "text", + "content": "You are a helpful assistant. The assistant first thinks about the reasoning process in the mind and then provides the user with the answer. The reasoning process and answer are enclosed within and tags, respectively, i.e., reasoning process here answer here . Now the user asks you to solve a reasoning problem. After thinking, when you finally reach a conclusion, clearly state the identity of each character within tags. [Problem]" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 233, + 144, + 376, + 156 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 144, + 376, + 156 + ], + "spans": [ + { + "bbox": [ + 233, + 144, + 376, + 156 + ], + "type": "text", + "content": "Figure 3: Example of prompt used." + } + ] + } + ], + "index": 2, + "type": "text" + }, + { + "bbox": [ + 104, + 177, + 504, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 177, + 504, + 255 + ], + "spans": [ + { + "bbox": [ + 104, + 177, + 504, + 255 + ], + "type": "text", + "content": "robust, generalizable heuristics across tasks. Second, heuristic curricula are static and cannot adapt to the evolving needs of the model during training. In contrast, DUMP dynamically adjusts sampling priorities based on actual model performance—measured via policy advantages—allowing it to focus on the most beneficial distributions at each stage of learning. Finally, the lack of standardized or widely accepted heuristic curricula for our task suite makes it hard to conduct fair and meaningful comparisons. Instead, we benchmark DUMP against uniform sampling and adaptive baselines, which are more reflective of current best practices in large-scale post-training pipelines." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 105, + 270, + 188, + 282 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 105, + 270, + 188, + 282 + ], + "spans": [ + { + "bbox": [ + 105, + 270, + 188, + 282 + ], + "type": "text", + "content": "D Limitations" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 104, + 294, + 504, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 104, + 294, + 504, + 350 + ], + "spans": [ + { + "bbox": [ + 104, + 294, + 504, + 350 + ], + "type": "text", + "content": "First, while the core idea of distribution-level curriculum learning is broadly applicable, we evaluate DUMP only in the context of large language models (LLMs) and do not extend the experiments to multimodal large language models (MLLMs) due to computational constraints. Second, our experiments are limited to 7B-scale models. Scaling our method to larger models remains an important direction for future work." + } + ] + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 300, + 741, + 310, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 6 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_content_list.json b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..09c6af1c701f9e7fdf7194a9f80d2a0968343029 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_content_list.json @@ -0,0 +1,3755 @@ +[ + { + "type": "text", + "text": "Can LLM feedback enhance review quality? A randomized study of 20K reviews at ICLR 2025", + "text_level": 1, + "bbox": [ + 114, + 135, + 883, + 186 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Nitya Thakkar1, Mert Yuksekgonul1, Jake Silberg1, Animesh Garg2, Nanyun Peng3, Fei Sha4, Rose Yu5, Carl Vondrick6, James Zou1", + "bbox": [ + 210, + 204, + 782, + 241 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{1}$ Stanford University", + "bbox": [ + 424, + 242, + 573, + 258 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "2Georgia Institute of Technology", + "bbox": [ + 380, + 260, + 616, + 276 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "3University of California, Los Angeles", + "bbox": [ + 362, + 277, + 633, + 292 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "4Google Research", + "bbox": [ + 434, + 295, + 562, + 310 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "5University of California, San Diego", + "bbox": [ + 367, + 311, + 627, + 328 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "$^{6}$ Columbia University", + "bbox": [ + 419, + 330, + 576, + 345 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 462, + 377, + 532, + 391 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Peer review at AI conferences is stressed by rapidly rising submission volumes, leading to deteriorating review quality and increased author dissatisfaction. To address these issues, we developed Review Feedback Agent, a system leveraging multiple large language models (LLMs) to improve review clarity and actionability by providing automated feedback on vague comments, content misunderstandings, and unprofessional remarks to reviewers. Implemented at ICLR 2025 as a large randomized control study, our system provided optional feedback to more than 20,000 randomly selected reviews. To ensure high-quality feedback for reviewers at this scale, we also developed a suite of automated reliability tests powered by LLMs that acted as guardrails to ensure feedback quality, with feedback only being sent to reviewers if it passed all the tests. The results show that $27\\%$ of reviewers who received feedback updated their reviews, and over 12,000 feedback suggestions from the agent were incorporated by those reviewers. This suggests that many reviewers found the AI-generated feedback sufficiently helpful to merit updating their reviews. Incorporating AI feedback led to significantly longer reviews (an average increase of 80 words among those who updated after receiving feedback) and more informative reviews, as evaluated by blinded researchers. Moreover, reviewers who were selected to receive AI feedback were also more engaged during paper rebuttals, as seen in longer author-reviewer discussions. This work demonstrates that carefully designed LLM-generated review feedback can enhance peer review quality by making reviews more specific and actionable while increasing engagement between reviewers and authors. The Review Feedback Agent is publicly available at https://github.com/zou-group/review_feedback_agent.", + "bbox": [ + 151, + 396, + 844, + 647 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1 Introduction", + "text_level": 1, + "bbox": [ + 112, + 669, + 303, + 686 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Scientific peer review is a critical step before publication, where domain experts evaluate the research to ensure thoroughness and scientific integrity, prevent false claims, and provide a strong foundation for future work [1, 2]. High-quality reviews are essential for authors to improve their work, address key limitations, and advance scientific progress. However, in a survey of 11,800 researchers worldwide, while $98\\%$ view peer review as essential to maintaining the quality and integrity of academic communication, only $55.4\\%$ expressed satisfaction with the quality of reviews they receive [3]. This dissatisfaction has grown as obtaining constructive and high-quality peer reviews has become more challenging due to the increase in the number of paper submissions, especially in fast-moving areas like Artificial Intelligence (AI) [4, 5]. For example, the International Conference on Learning Representations (ICLR) experienced year-over-year submission increases of $47\\%$ in 2024 and $61\\%$ in 2025 [6]. To maintain a rigorous and meaningful peer review process amid this growth, it is crucial to address the growing burden on reviewers and the subsequent deterioration in review quality.", + "bbox": [ + 109, + 700, + 883, + 880 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Authors at AI conferences increasingly report receiving short, vague reviews with criticisms like 'not novel' or 'not state-of-the-art (SOTA)' [7]. At the 2023 Association for Computational Linguistics meeting,", + "bbox": [ + 111, + 881, + 883, + 912 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09737v1 [cs.AI] 13 Apr 2025", + "bbox": [ + 22, + 270, + 60, + 705 + ], + "page_idx": 0 + }, + { + "type": "page_number", + "text": "1", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "authors flagged $12.9\\%$ of reviews for poor quality, primarily due to these vague, surface-level criticisms [8]. The peer review system is further strained by reviewers being assigned papers outside their expertise [9] and the same papers being reviewed multiple times due to high rejection rates [1]. Additionally, the 2014 NeurIPS Experiment highlighted inconsistencies in the peer review process by showing that approximately $25\\%$ of paper acceptance decisions differed between two independent review committees [10]. These issues not only frustrate authors but potentially allow weaker research to be accepted while strong work is rejected, ultimately preventing papers from reaching their full potential due to the decline of meaningful dialogue between reviewers and authors.", + "bbox": [ + 109, + 90, + 883, + 210 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Large language models (LLMs) [11] have the potential to enhance the quality and usefulness of peer reviews for authors [12]. Recent studies demonstrated that LLMs can serve as effective critics, generating detailed and constructive feedback [13, 14]. Furthermore, LLMs have already shown high utilization in the peer review process. Reviewers are increasingly turning to LLMs to assist in drafting their reviews, with an estimated $10.6\\%$ of reviewers at ICLR 2024 using LLMs for this purpose [15, 16].", + "bbox": [ + 109, + 210, + 883, + 287 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "To explore how LLMs can improve review quality at scale, we introduce Review Feedback Agent, a multi-LLM system designed to enhance the clarity and actionability of reviews by providing feedback to reviewers. Piloted at ICLR 2025 as a large randomized control study, our agent provided feedback to over 20,000 randomly selected reviews (representing half of all ICLR 2025 reviews) over four weeks from October 15 to November 12, 2024. The generated feedback primarily focused on minimizing instances of vague and unjustified comments while also addressing content misinterpretations and unprofessional remarks. Using Claude Sonnet 3.5 as the backbone [11], we created a system of five LLMs that collaborated to generate high-quality feedback. To enhance the system's reliability against potential errors or failures in instruction-following [17, 18], we developed a set of reliability tests to evaluate specific qualities of the generated feedback; the feedback was only posted if it passed all of these tests.", + "bbox": [ + 109, + 287, + 883, + 436 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Summary of main findings. Of the randomly selected ICLR reviews that received AI feedback, $26.6\\%$ of reviewers updated their reviews, altogether incorporating 12,222 suggestions from the feedback agent into the reviews. Blinded ML researchers labeled these revised reviews as more informative and clearer than their initial versions. Reviewers who updated after receiving feedback increased the length of reviews by an average of 80 words. Furthermore, AI feedback led to more engaged discussions during the rebuttal period, as seen through longer author and reviewer responses. We also observed that reviewers who received feedback were more likely to change their scores after the rebuttal period, which was consistent with a more engaged rebuttal process.", + "bbox": [ + 109, + 438, + 883, + 558 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In this study, we present the first large-scale deployment for using LLMs to assist peer review. By making reviews more actionable and informative, we aim to enhance the peer review experience and promote a more constructive scientific process.", + "bbox": [ + 109, + 559, + 883, + 604 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2 Methods", + "text_level": 1, + "bbox": [ + 112, + 627, + 261, + 645 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "In what follows, we first describe the review feedback experiment, including its goals and our technical setup with OpenReview. Next, we outline the architecture of our Review Feedback Agent and explain how the system was designed to meet our goals while ensuring a high level of reliability. In total, the agent automatically provided feedback to over 20,000 reviews at ICLR 2025.", + "bbox": [ + 109, + 657, + 883, + 719 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2.1 ICLR 2025 review feedback experiment", + "text_level": 1, + "bbox": [ + 109, + 737, + 549, + 756 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our pilot study was conducted in collaboration with ICLR 2025 and OpenReview. As one of the world's fastest-growing AI conferences, ICLR receives thousands of paper submissions yearly; in 2025, ICLR received 11,603 submissions. Each submission is assigned an average of 4 reviewers, and all reviews are standardized to include the same sections: summary, strengths, weaknesses, and questions. Furthermore, reviewers provide scores on a scale of 1 (low) to 10 (high), rating the paper according to the following categories: soundness, presentation, contribution, rating, and confidence.", + "bbox": [ + 109, + 762, + 883, + 852 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Goal: Our goal was to enhance review quality and, in particular, reduce low-information content reviews. Toward this goal, we identified three categories of common issues in reviews that we hoped to improve by providing LLM-generated feedback. The common issues are: 1) vague or generic critiques in reviews (the", + "bbox": [ + 109, + 852, + 883, + 898 + ], + "page_idx": 1 + }, + { + "type": "page_number", + "text": "2", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg", + "image_caption": [ + "A" + ], + "image_footnote": [], + "bbox": [ + 127, + 130, + 444, + 359 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 467, + 119, + 870, + 362 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg", + "image_caption": [ + "C", + "Figure 1: (A) Randomized controlled study setup. Before the start of the review period, we randomly assigned all submissions to one of three groups to determine how many of its reviews received feedback: none, half, or all. When a review selected to receive feedback was submitted, the agent generated and posted feedback after 1 hour. Reviewers could update their review, optionally, based on the feedback until the end of the review period, which ran from October 14 to November 12, 2024. (B) Feedback categories. Our system is designed to address three main types of review comments. Here, we provide examples of comments that would receive feedback from our agent, as well as examples of the generated feedback. (C) Review Feedback Agent. Our system consists of five LLMs (Actors, Aggregator, Critic, and Formmatter). Two parallel Actors generate the initial feedback, then pass it to the Aggregator, the Critic, and finally the Formmatter. Finally, the feedback is passed through the reliability tests; upon successfully passing, the feedback is posted on a review. We provide examples of comments and feedback given to those comments by our system." + ], + "image_footnote": [], + "bbox": [ + 138, + 377, + 875, + 670 + ], + "page_idx": 2 + }, + { + "type": "page_number", + "text": "3", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "feedback asks the reviewers to be more specific and actionable); 2) questions or confusions that could be addressed by overlooked parts of the paper (the feedback highlights relevant sections); and 3) unprofessional statements in the review (the feedback asks the reviewer to rephrase). For each comment in a review, the Review Feedback Agent determined if it fell into any of these problematic categories and, if so, provided feedback on that specific review comment.", + "bbox": [ + 109, + 90, + 883, + 166 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Experimental setup: We set up this experiment as a Randomized Control Trial (RCT) to enable us to make causal inferences about how receiving feedback influences the peer review process. Before the beginning of the review period, we randomly split papers into one of three equal groups (see Figure 1A):", + "bbox": [ + 109, + 167, + 883, + 212 + ], + "page_idx": 3 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. No reviews for this paper will receive feedback,", + "2. Half of the reviews for this paper will be randomly selected to receive feedback,", + "3. All reviews for this paper will receive feedback." + ], + "bbox": [ + 130, + 219, + 723, + 282 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "For reviews randomly assigned to receive feedback, the Review Feedback Agent, wrapped in an API, was automatically triggered when a reviewer first submitted their review on OpenReview. We delayed the feedback generation by one hour after a review was initially submitted to allow reviewers time to make any small edits (e.g., typo corrections). See Figure 1A for an example timeline. The agent posted feedback to reviews through the OpenReview interface by replying to reviews with the feedback wrapped in a comment. See Figure 2 for an example of what feedback looked like on the OpenReview website.", + "bbox": [ + 109, + 289, + 883, + 380 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg", + "image_caption": [ + "Figure 2: OpenReview interface. Here, we provide an example of feedback posted to a review on the OpenReview website (with consent from the reviewer). Feedback is only visible to the reviewer and the ICLR program chairs and was posted roughly one hour after the initial review was submitted." + ], + "image_footnote": [], + "bbox": [ + 235, + 404, + 763, + 795 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "The agent only provided feedback on the initial review, and there was no subsequent interaction between the reviewer and the feedback system after that time point. The feedback is only visible to the reviewer", + "bbox": [ + 109, + 881, + 883, + 912 + ], + "page_idx": 3 + }, + { + "type": "page_number", + "text": "4", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "and the ICLR program chairs; it was not shared with other reviewers, authors, or area chairs and was not a factor in the acceptance decisions. Reviewers were informed that the feedback was generated by a LLM and could choose to ignore the feedback or revise their review in response, as the system did not make any direct changes. Finally, we did not access or store any identifiable information about authors or reviewers. This study was reviewed by IRB and deemed low risk.", + "bbox": [ + 109, + 90, + 883, + 165 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Statistics: Around $50\\%$ of reviews were randomly selected to receive feedback. Of the 44,831 reviews submitted on 11,553 unique papers (we excluded desk-rejected submissions), we posted feedback to 18,946 reviews $(42.3\\%)$ over 4 weeks from October 15 to November 12, 2024 (see Figure 2A). Less than $8\\%$ of the selected reviews did not receive feedback for one of two reasons: 2,692 reviews were originally well-written and did not need feedback, while 829 reviews had feedback that failed the reliability tests. Each review took roughly one minute to run through our entire pipeline and cost around 50 cents. On average, each review that received feedback was given 3-4 feedback comments, with a minimum of 1 and a maximum of 17.", + "bbox": [ + 109, + 167, + 883, + 272 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "2.2 Review Feedback Agent", + "text_level": 1, + "bbox": [ + 112, + 290, + 398, + 306 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "The Review Feedback Agent aimed to provide feedback that helped reviewers make their comments more specific, constructive, and actionable for the authors.", + "bbox": [ + 109, + 316, + 883, + 344 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Feedback categories: The Review Feedback Agent provided suggestions on three potential categories of issues in reviews. We curated these categories by examining reviewer guidelines from several AI conferences [19, 20, 21, 22] and evaluating previously identified patterns of \"lazy reviewer thinking\" [7]. We also took inspiration from the ARR guidelines, where 16 common reviewer heuristics are outlined [23]. Importantly, the agent was not designed to suggest new ideas to add to the review; rather, it only focused on revising the existing ideas and preventing lower-quality reviews. The target feedback areas that we ultimately focused on were:", + "bbox": [ + 109, + 345, + 883, + 450 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Improving specificity: Encouraging reviewers to rephrase vague review comments, making them more specific, actionable, and justified for the authors.", + "2. Addressing misunderstandings: Highlighting sections of the paper that may already address some of the reviewer's questions or confusion.", + "3. Reducing unprofessional remarks: Identifying and addressing unprofessional or inappropriate remarks in the review. A 2019 study of 1,106 researchers found that $58\\%$ had received an unprofessional review, highlighting its prevalence [24]." + ], + "bbox": [ + 130, + 464, + 879, + 590 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "See Figure 1B for examples of real reviewer comments (from ICLR 2024 reviews and public journal reviews) in each category that would receive feedback and examples of feedback that would be given.", + "bbox": [ + 111, + 602, + 879, + 632 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Preprocessing: The agent was provided with the paper PDF's text (extracted using pypdf's PDFReader [25]) and the review text as input. We extracted the summary, strengths, weaknesses, and questions sections from the review. We did not provide the agent with any of the scores the reviewer initially gave the paper.", + "bbox": [ + 111, + 633, + 879, + 676 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Architecture: The agent generated a list of pairs, with each pair consisting of a review comment that fit into one of the problematic categories above and the corresponding feedback provided for that comment. The agent was composed of a pipeline of five LLMs (see Algorithm 1, Figure 1C). We used the Claude Sonnet 3.5 (June 20, 2024) model [11] as the backbone; we picked the backbone model by generating feedback with the same prompt using GPT-4o, Gemini 1.5 Flash, and Claude Sonnet 3.5 and then conducting a blind preference evaluation. Additionally, through testing, we found that one LLM was insufficient to generate high-quality feedback and format it correctly, thus, we instantiated the multi-call pipeline. First, we defined two parallel actor LLMs to generate the initial set of feedback based on the previously defined target areas. The actors were provided with the initial review $(R)$ and paper text $(P)$ as inputs. We used two separate actors to optimize for feedback diversity. Then, we passed the two lists of feedback, $F_{1}$ and $F_{2}$ , to an aggregator LLM, which merged the lists into one set of feedback, $F_{combined}$ . Next, we passed this candidate list to a critic LLM responsible for ensuring the feedback was accurate and clear. Importantly, the critic also removed any feedback that was too superficial or nitpicky, defined through various in-context examples (see Appendix A for the examples), as we did not want to overwhelm or annoy reviewers. Finally, aformatter LLM was provided with this final list, $F_{filtered}$ , and formatted it into pairs:", + "bbox": [ + 111, + 679, + 883, + 904 + ], + "page_idx": 4 + }, + { + "type": "page_number", + "text": "5", + "bbox": [ + 493, + 935, + 503, + 946 + ], + "page_idx": 4 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- **Reviewer comment:** a comment", + "- **Feedback to the reviewer:** feedback to the comment" + ], + "bbox": [ + 137, + 90, + 555, + 130 + ], + "page_idx": 5 + }, + { + "type": "code", + "sub_type": "algorithm", + "code_caption": [ + "Algorithm 1 Review Feedback Agent" + ], + "code_body": "1: Input: Paper text $P$ , Review $R$ , max attempts $T = 2$ \n2: for $t = 1$ to $T$ do \n3: $F_{1} \\gets \\mathrm{Actor}_{1}(P, R)$ \n4: $F_{2} \\gets \\mathrm{Actor}_{2}(P, R)$ \n5: $F_{combined} \\gets \\mathrm{Aggregator}(F_{1}, F_{2})$ \n6: $F_{filtered} \\gets \\mathrm{Critic}(F_{combined})$ \n7: $F_{final} \\gets \\mathrm{Formatter}(F_{filtered})$ \n8: if PassReliabilityTests(Ffinal) then return $F_{final}$ \n9: end if \n10: end for \n11: return error", + "bbox": [ + 116, + 169, + 535, + 335 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "See Appendix A for the exact prompts used. To refine this system, we constructed a test set of 50 ICLR 2024 reviews we perceived to be of low quality in one or more of our target areas (i.e., they made vague comments, asked questions that were present in the paper already, and/or made unprofessional remarks). We iteratively ran our agent on this test set, examined the generated feedback, and refined the prompts to optimize the results. This procedure ultimately led to prompts that produced high-quality feedback for all 50 reviews in the test set.", + "bbox": [ + 109, + 357, + 883, + 446 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Reliability testing: Inspired by [26], we also developed a suite of reliability tests designed to act as guardrails, ensuring the quality of our generated feedback. Reliability tests evaluate specific attributes of a model's output. The four reliability tests we developed ensured the feedback provided constructive suggestions, addressed the reviewer, did not simply restate what the reviewer wrote, and was formatted correctly. We provide the exact reliability tests we used and examples of feedback that would fail the reliability tests in Appendix B. We developed up to five test cases for each reliability test and refined the reliability test prompts until we passed all the test cases. To refine our Review Feedback Agent's pipeline and prompts, we passed our test set reviews through the validated reliability tests until we achieved a $100\\%$ pass rate.", + "bbox": [ + 109, + 446, + 883, + 582 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Feedback was only posted to a review if it passed all our reliability tests; if it failed, we re-ran the entire pipeline a second time ( $T = 2$ ) to generate new feedback. Upon a second fail, we returned an error and did not post the feedback. Over $96\\%$ of generated feedback for ICLR 2025 reviews passed all reliability tests.", + "bbox": [ + 109, + 583, + 883, + 630 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3 Results", + "text_level": 1, + "bbox": [ + 112, + 651, + 243, + 669 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "3.1 Impact of feedback on review updates and reviewer engagement", + "text_level": 1, + "bbox": [ + 109, + 683, + 785, + 702 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "First, we aimed to objectively measure how many reviewers updated their reviews after receiving feedback compared to those who did not receive feedback. This enabled us to assess how the feedback may have been associated with changes in various components of their review, such as length and scores.", + "bbox": [ + 109, + 708, + 883, + 753 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "We conducted this ICLR experiment as a randomized controlled study by randomly splitting all reviews into one of two groups: not selected to receive feedback (control group) or selected to receive feedback (feedback group) - see Section 2 for more details. Note that the group selected to receive feedback includes the $7.9\\%$ of reviews that were selected but did not actually receive feedback, mostly because AI deemed feedback not necessary there. This intent-to-treat definition of the feedback group enables us to conduct causal analysis but could dilute the actual effect of the feedback.", + "bbox": [ + 109, + 753, + 883, + 843 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Of all reviews in the feedback group, we further defined reviews that successfully received feedback as either being not updated or updated. A review is not updated if a reviewer did not edit their review after receiving feedback or if the edit distance between the initial and modified review was less than 5; this edit distance filtering accounted for minor updates such as fixing typos or modifying scores. Conversely, a review", + "bbox": [ + 109, + 844, + 883, + 905 + ], + "page_idx": 5 + }, + { + "type": "page_number", + "text": "6", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 5 + }, + { + "type": "image", + "img_path": "images/c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg", + "image_caption": [ + "A", + "Figure 3: (A) Feedback statistics. Among all ICLR 2025 reviews, 22,467 were randomly selected to receive feedback (feedback group), and 22,364 were randomly selected not to receive feedback (control group). Of those selected to receive feedback, 18,946 $(42.3\\%)$ successfully received feedback, with $26.6\\%$ of those reviewers updating their reviews. (B) Update rates. (Top) Most reviews were submitted 2-3 days before the review deadline (November 4, 2024). (Bottom) Reviewers were more likely to update their review if they submitted it early relative to the deadline. Reviewers who received feedback were much more likely to update their reviews than those in the control group, with a difference of approximately 17 percentage points. (C) Average change in review length (measured as number of words). Review length is measured only for the following sections: summary, strengths, weaknesses, and questions. The difference in review length between the control and feedback groups is statistically significant $(^{**}\\mathrm{p} \\leq 0.01)$ , with being selected to receive feedback leading to an average increase of 14 words more (a $200\\%$ increase) in review length compared to the control group. The difference is more pronounced between the not-updated and updated groups $(^{***}\\mathrm{p} \\leq 0.001)$ ." + ], + "image_footnote": [], + "bbox": [ + 184, + 176, + 460, + 526 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 504, + 180, + 836, + 522 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/64415fd85d0b933fa308e605b768e68b5bb8236d64a7550c074a78efa794cf12.jpg", + "table_caption": [ + "C" + ], + "table_footnote": [], + "table_body": "
ControlFeedbackNot updatedUpdated
Average change in length7.021.0** (Δ + 200%)2.180.3***
", + "bbox": [ + 171, + 573, + 823, + 619 + ], + "page_idx": 6 + }, + { + "type": "page_number", + "text": "7", + "bbox": [ + 493, + 936, + 503, + 946 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "is updated if a reviewer did edit their review after receiving feedback and the edit distance between the initial and modified review was greater than 5.", + "bbox": [ + 111, + 90, + 883, + 119 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Of the 18,946 reviews that successfully received feedback, 5,031 (26.6%) reviews were updated (Figure 3A). Out of the 22,364 reviews in the control group, only 2,103 (9.4%) were updated; here, we define updated for the control group as a reviewer updating at least one hour after posting (the time it takes for the feedback group to receive feedback) with an edit distance greater than 5. With an update rate difference of roughly 17 percentage points (Figure 3B), we can see that reviews that received feedback were much more likely to be updated than those that did not.", + "bbox": [ + 111, + 121, + 883, + 210 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "In Figure 3B, we also see that reviewers who submitted early relative to the deadline (November 4, 2024) were more likely to update their review than those who submitted close to or after the deadline. This suggests that more organized reviewers, who may already be more engaged in the review process, were more likely to revise their reviews in response to feedback. While this will influence our analysis comparing the not updated and updated groups, we can be confident that the underlying distribution of the control and feedback groups is similar and not biased by factors such as reviewer organization because we conducted this as an RCT. Randomization helps mitigate such biases, making it possible to assess the causal impact of the feedback on the peer review process.", + "bbox": [ + 111, + 212, + 883, + 332 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Finally, we analyzed the change in review length (number of words in the summary, strengths, weaknesses, and questions sections) among the groups (Figure 3C). We compared the initial review length and the modified review length; we refer to modified reviews as the review at the end of the four-week review period before the rebuttal period began (only these modified reviews are made public to authors). We saw that review length, on average, increased across all groups. First, we observed that being selected to receive feedback caused the average review length to increase by about 14 words more than reviews that were not selected to receive feedback. Note that this effect size is deflated due to the substantial number of reviewers who received feedback but did not update their review, as well as the $7.9\\%$ of reviews that were selected to receive feedback but did not actually get it. We also see that updating the review after receiving feedback is associated with a statistically significant increase in review length (80 words) compared to not updating the review (2 words). We can infer that reviewers who updated their reviews were editing them more consistently to incorporate more detail and nuance, explaining this large increase in length. In addition to feedback causing an increase in review length, we also found that a significantly higher percentage of reviewers who received feedback edited at least one of their scores (soundness, presentation, contribution, rating, and confidence) during the review period, with $8.1\\%$ of them making edits compared to $7.5\\%$ among the control group $(p \\leq 0.05)$ . In Appendix C, we observe no significant difference in the average score changes between the feedback and control groups.", + "bbox": [ + 114, + 333, + 883, + 590 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "3.2 Measuring how much feedback reviewers incorporate", + "text_level": 1, + "bbox": [ + 111, + 608, + 679, + 625 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Of the reviewers that updated their review, we wanted to measure what proportion of them incorporated one or more pieces of feedback they were provided. This analysis helped us estimate how many reviewers found the feedback useful.", + "bbox": [ + 111, + 633, + 883, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We counted a piece of feedback as incorporated if the reviewer clearly integrated some part of the feedback into their modified review. To systematically carry out this analysis, we developed an LLM-based pipeline to run on all updated reviews (see Supplementary Figure S2A). We used the Claude Sonnet 3.5 model to evaluate whether each feedback item received by a reviewer was incorporated into their modified review. See Appendix D for our approach to validating this pipeline. Of the 5,031 reviews that reviewers updated, encompassing 18,322 total feedback items, $89\\%$ of reviewers incorporated at least one piece of feedback. This represents $23.6\\%$ of all reviewers who received feedback (Figure 4A). In total, we estimate that 12,222 feedback items were incorporated into revised reviews. We also examined the number of feedback items reviewers who updated their reviews received compared to how many feedback items they incorporated (Figure 4B). We see that when reviewers receive fewer feedback items, they are more likely to incorporate more (or even all) of the items. Overall, the average reviewer who updated their review incorporated $69.3\\%$ of the feedback they received; in other words, given 3 pieces of feedback, the average reviewer who updated their review incorporated 2 of them.", + "bbox": [ + 114, + 678, + 883, + 873 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Below are a few examples of realincorporations reviewers made based on their feedback. We provide the initial review comment they posted, the feedback they received, and then their modified comment.", + "bbox": [ + 111, + 875, + 883, + 905 + ], + "page_idx": 7 + }, + { + "type": "page_number", + "text": "8", + "bbox": [ + 493, + 936, + 504, + 946 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg", + "image_caption": [ + "A" + ], + "image_footnote": [], + "bbox": [ + 153, + 147, + 439, + 309 + ], + "page_idx": 8 + }, + { + "type": "image", + "img_path": "images/574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg", + "image_caption": [ + "B", + "Of 12,222 total feedback items incorporated:", + "Figure 4: (A) Overall incorporation statistics. Through our LLM-based incorporation analysis, we estimate that $23.6\\%$ of reviewers who were given feedback incorporated at least one feedback item they were given. This means that $89\\%$ of reviewers who updated their review after receiving feedback incorporated at least one item. (B) Feedback incorporation trends. Here, we illustrate the relationship between the number of feedback items reviewers who updated their review received and how many of those items they incorporated. In total, reviewers incorporated 12,222 feedback items. Notably, reviewers were more likely to incorporate feedback when given fewer items." + ], + "image_footnote": [], + "bbox": [ + 464, + 138, + 826, + 344 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Incorporation example: Feedback that encourages specificity", + "text_level": 1, + "bbox": [ + 138, + 500, + 575, + 515 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Original comment: Theorems 1, 2, 3 require the notion of interpolation, which is considered a strong assumption to have.", + "bbox": [ + 135, + 526, + 857, + 556 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Feedback: To make this critique more actionable, it would be helpful to elaborate on why the interpolation assumption is problematic in this context. Are there particular types of federated learning problems where this assumption is unlikely to hold? Providing concrete examples or suggestions for how the authors could relax or justify this assumption would give them clearer guidance on addressing this limitation.", + "bbox": [ + 135, + 569, + 857, + 643 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Revised comment: Theorems 1, 2, 3 require the notion of interpolation. Even though an explanation of regimes that satisfy this condition is provided, considering that there are previous works [1], [2] that extend beyond that setting, this assumption seems to be an avenue for future work in this field. More specifically, the initial FedProx algorithm [1] is analyzed in the general non-interpolated setting. In addition, the follow-up work regarding the FedExProx algorithm [2] considers in the main paper the interpolated regime. However, the authors provide additionally an illustration of the algorithm's behaviour in the non-interpolated setting (see Appendix F.3 in [2]). In that sense, it would be useful to provide some additional details on the behaviour of the algorithm in the non-interpolated setting or to comment on the main challenges in extending the current proof technique beyond the interpolation framework, offering in that way a more complete picture and direction for future research.", + "bbox": [ + 135, + 657, + 857, + 809 + ], + "page_idx": 8 + }, + { + "type": "page_number", + "text": "9", + "bbox": [ + 493, + 935, + 504, + 946 + ], + "page_idx": 8 + }, + { + "type": "text", + "text": "Incorporation example: Feedback that identifies content oversight", + "text_level": 1, + "bbox": [ + 138, + 92, + 611, + 107 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Original reviewer comment: How is the performance on Llama?", + "bbox": [ + 138, + 119, + 629, + 133 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Feedback to the reviewer: The paper appears to provide results on Llama 2-7B in Table 3. Does this address your question? If not, could you clarify what specific aspects of performance on Llama you're interested in that aren't covered by the current results?", + "bbox": [ + 137, + 147, + 857, + 191 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Modified reviewer comment: How is the performance on Llama3 with the alpaca dataset?", + "bbox": [ + 138, + 205, + 815, + 220 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Incorporation example: Feedback that makes review more actionable", + "text_level": 1, + "bbox": [ + 138, + 242, + 635, + 257 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Original reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification.", + "bbox": [ + 138, + 268, + 856, + 300 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Feedback to the reviewer: To make this comment more actionable, consider asking the authors to discuss potential extensions or challenges in applying their approach to other visual tasks. For example: 'How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?'", + "bbox": [ + 137, + 311, + 857, + 372 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Modified reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification. How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?", + "bbox": [ + 137, + 385, + 857, + 431 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Finally, we wanted to assess whether these incorporated reviews were clearer, more specific, and more actionable for authors. To conduct this analysis, we asked two human AI researchers to conduct a blind preference evaluation between the initial and modified pre-rebuttal reviews. Specifically, we focused on reviews in the updated group that received 3-4 feedback items (the average number given) where the proportion of incorporated feedback exceeded 0.60. This threshold was chosen because the average incorporation rate was $67\\%$ , and we aimed to assess whether an average updated review with incorporated feedback was perceived as an improvement. Human annotators preferred modified reviews $89\\%$ of the time (out of 100 examples), indicating that reviewers who incorporated feedback consistently produced higher-quality reviews.", + "bbox": [ + 109, + 448, + 883, + 571 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "3.3 Influence of feedback on rebuttals and decisions", + "text_level": 1, + "bbox": [ + 111, + 588, + 629, + 603 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "We next analyzed the impact of being selected to receive feedback on the rebuttal process and decision outcomes. The rebuttal period took place over three weeks between November 12 and December 4, 2024, and was a time when authors could respond to their reviewer's comments as they revised their papers. We examined how the feedback causally impacted different engagement measures during the rebuttal period.", + "bbox": [ + 109, + 613, + 883, + 674 + ], + "page_idx": 9 + }, + { + "type": "table", + "img_path": "images/af1bd13c6bc85f999f39c1860d84dfd23f079d50200f0c7318b39fdd1a4c968b.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ControlFeedbackNot updatedUpdated
Average length of author rebuttal807855*** (Δ + 6%)840896***
Average length of reviewer replies110116*** (Δ + 5.5%)115129***
", + "bbox": [ + 135, + 685, + 854, + 748 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "Table 1: Average change in rebuttal and reply length (measured as number of words). We observe that being selected to receive feedback causally increased the length of author rebuttals by an average of 48 words $(6\\%;^{**}\\mathrm{p}\\leq 0.001)$ for reviews written by reviewers who were selected to receive feedback, compared to those who were not. We also see that the average length of reviewer replies to author rebuttals is significantly longer among those who were selected to receive feedback, with an average increase of 6 words $(5.5\\%;^{**}\\mathrm{p}\\leq 0.001)$ .", + "bbox": [ + 109, + 758, + 883, + 849 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "In the first row of Table 1, we observed that authors posted rebuttals that were, on average, $6\\%$ longer (48 words) to reviews written by reviewers who were selected to receive feedback, which is significantly longer than those posted to reviews in the control group. In other words, authors were generally more engaged when", + "bbox": [ + 111, + 864, + 883, + 910 + ], + "page_idx": 9 + }, + { + "type": "page_number", + "text": "10", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 9 + }, + { + "type": "text", + "text": "their reviewer was selected to receive feedback. This could be because the feedback led to clearer and more actionable reviews, allowing authors to more effectively address and respond to the reviewer's comments with more detailed rebuttals. In the second row of Table 1, we also saw that reviewers who were selected to receive feedback responded to these rebuttals with replies that were, on average, $5.5\\%$ longer (6 words) than those who were not selected, again highlighting increased engagement among reviewers if they were in the feedback group.", + "bbox": [ + 109, + 90, + 883, + 181 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "This increased engagement is reflected in the percentage of reviewers who edited one or more of their scores for a paper during the rebuttal period. We found that $31.7\\%$ of reviewers who received feedback edited their scores, compared to $30.6\\%$ of those who did not, consistent with receiving feedback being associated with greater reviewer-author engagement. Overall, these findings lead us to conclude that authors were better able to address their reviewers' original concerns during the rebuttal period if their reviewer was selected to receive feedback, leading to more engagement and satisfaction among both groups.", + "bbox": [ + 109, + 181, + 883, + 271 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Finally, we evaluated whether papers with reviews that were selected to receive feedback had a different acceptance rate than those that were not. We compared the acceptance rates of the control and feedback groups, defining the control group as all papers where no reviews were selected to receive feedback and the feedback group as those where at least one review was selected to receive feedback. While there was a slightly higher acceptance rate of $32.3\\%$ among papers in the feedback group, compared to $30.8\\%$ among the control group, this difference was not statistically significant. This indicates that while receiving feedback promoted more engaged and thorough discussions among reviewers and authors, it did not substantially change acceptance rates.", + "bbox": [ + 109, + 272, + 883, + 393 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "3.4 Clustering analysis of the feedback comments", + "text_level": 1, + "bbox": [ + 111, + 410, + 607, + 428 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg", + "image_caption": [ + "A" + ], + "image_footnote": [], + "bbox": [ + 114, + 489, + 377, + 599 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 372, + 460, + 584, + 616 + ], + "page_idx": 10 + }, + { + "type": "image", + "img_path": "images/4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg", + "image_caption": [ + "Figure 5: (A) Feedback clusters. We used an LLM to group all the feedback items we provided to reviewers into five distinct clusters based on the text. We found that nearly half of the feedback was directed at asking the reviewer to 'clarify methodological concerns to make their request specific and actionable.' The next most popular cluster was feedback asking the reviewer to 'clarify their request by adding specific analyses, baselines, or references.' (B) Incorporation rate by cluster. We measured the percentage of feedback items within each cluster that reviewers incorporated. Overall, $17.7\\%$ of all feedback was incorporated. When examined by cluster, incorporation rates ranged from $14\\%$ to $18\\%$ , with no statistically significant differences observed." + ], + "image_footnote": [], + "bbox": [ + 606, + 463, + 875, + 616 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "To gain more insights into what types of feedback were provided by the AI agent, we present a quantitative clustering analysis of the feedback generated. Of the 69,836 total feedback items the agent gave to reviewers, we characterized the feedback by generating distinct clusters. Inspired by [27, 28], we generated five clusters on 1000 randomly sampled feedback items using an LLM; we repeated this process twice to ensure robustness. Of the 10 candidate clusters, we prompted a second LLM to select five non-overlapping clusters, giving us the five clusters we present below. Finally, we prompted a third LLM to assign the feedback items to one of the five clusters so we could determine what proportion of feedback belonged to each cluster. We depict the five clusters in Figure 5A, as well as examples of feedback items that would fall into each cluster in Table 2.", + "bbox": [ + 109, + 791, + 883, + 912 + ], + "page_idx": 10 + }, + { + "type": "page_number", + "text": "11", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 10 + }, + { + "type": "table", + "img_path": "images/b87b6b6a84e207829cfe46a705418008d45b8586d798978d7b7854b20f519099.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ClusterReviewer commentFeedback to the reviewer
Clarify methodological concerns to make request specific and actionableThe experimental setup and tool flow used to conduct the experiments should be described in more detail.To make this feedback more actionable, it would be helpful to specify what aspects of the experimental setup and tool flow you feel are missing. For example, are you looking for more details on the hardware specifications used for CPU/GPU/FPGA experiments, software frameworks and versions, training hyperparameters, or quantization settings?
Clarify request by adding specific analyses, baselines, or referencesAll the experiment examples are numerical and toy, and there is a large mismatch with real physical engineering systems. Therefore, I do not think they are convincing.To make this critique more constructive, you could suggest specific real-world physical systems or benchmarks that would provide a more convincing evaluation of the method. This would give the authors concrete ideas for improving their experimental validation.
Clarify suggestions to visuals or formatA few suggestions, Fig. 2 is very hard to understand. Is there another way to convey the same information which is easier to read?To make this feedback more constructive, try to articulate what specific aspects of Figure 2 are difficult to understand. You could suggest alternative visualization formats or specific elements that need clarification. This would help the authors know how to improve the figure.
Request authors expand on methodological limitations or broader implicationsCSP can only support binary labels.To make this comment more constructive, you could ask the authors to discuss the implications of this limitation for real-world applications and whether they have considered any extensions to support multi-class problems.
Clarify desired improvements to theoretical analysis or proofsHow standard are the assumptions (Assumptions C.1 and C.2) required for proving Theorems 4.1 and 4.2? It would be helpful to see more justification regarding their validity (e.g. other papers building based upon similar assumptions).This is an excellent question that probes the theoretical foundations of the paper. To make it more actionable, you could ask the authors to provide specific examples of other works in the field that rely on similar assumptions, or to explain how these assumptions compare to those typically used in related theoretical analyses. This would help contextualize the theoretical contributions within the broader literature.
", + "bbox": [ + 114, + 87, + 919, + 664 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Table 2: Examples of AI-generated feedback that belong to each of the five main clusters. We also provide the original review comment that triggered the generation of the feedback.", + "bbox": [ + 111, + 679, + 883, + 710 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "These clusters indicate that the vast majority of feedback provided was addressed towards vague review comments and aimed to make them more specific, actionable, and justified. We saw that the agent rarely chose to comment on content misunderstandings, in large part because it had to be absolutely certain there was an error and provide a direct quote from the paper highlighting the mistake as we did not tolerate any hallucinations. Therefore, we saw that the model would err on the side of caution and not provide many comments related to that category.", + "bbox": [ + 109, + 734, + 883, + 825 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "We also sought to measure the percentage of feedback items within each cluster that were incorporated by reviewers, as shown in Figure 5B. Overall, out of the 69,836 feedback items given, we found that $17.7\\%$ of all feedback was incorporated. On a cluster basis, we found that the 'clarify request by adding specific analyses, baselines, or references' and 'clarify desired improvements to theoretical analysis or proofs' clusters had the highest incorporation rate at $18\\%$ . The 'clarify suggestions to visuals or format' cluster had the", + "bbox": [ + 109, + 825, + 883, + 902 + ], + "page_idx": 11 + }, + { + "type": "page_number", + "text": "12", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "lowest incorporation rate at $14\\%$ . Overall, we do not see statistically significant differences in incorporation rates among the clusters, implying that reviewers did not find certain categories of feedback to be more or less useful than others.", + "bbox": [ + 109, + 90, + 885, + 137 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "4 Related Works", + "text_level": 1, + "bbox": [ + 112, + 159, + 331, + 180 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Due to their extensive capabilities, LLMs are being used across every stage of the peer review process. Reviewers increasingly use LLMs to assist in drafting peer reviews [15, 29, 30]. An estimated $17.5\\%$ of authors of Computer Science abstracts on arXiv [31] and $10.6\\%$ of reviewers at ICLR 2024 [16] used LLMs for writing assistance. Other studies have shown the potential of LLMs to make the entire review pipeline more efficient across various stages [32, 33, 34, 35] such as writing manuscripts [36], initial quality control [37, 38, 27], and even providing AI-generated instructions for how to write reviews [39].", + "bbox": [ + 109, + 190, + 883, + 281 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "As peer review workloads continue to increase, LLMs present an opportunity to alleviate some of the burden on human reviewers by providing reviews of submitted manuscripts. In a prospective survey study, 308 researchers from 110 institutions received GPT-4-generated feedback on their papers. Of these, $57.4\\%$ found the feedback helpful, and $82.4\\%$ felt it was more useful than the feedback provided by at least some human reviewers [12]. Building off of this work, [40] proposed a multi-agent review generation system that improved the specificity and helpfulness of feedback provided compared to GPT-4, reducing the rate of generic comments from $60\\%$ to $29\\%$ .", + "bbox": [ + 109, + 281, + 885, + 386 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Furthermore, LLMs offer an efficient and possibly less biased alternative to human evaluations; [41] found that human evaluators of peer reviews were highly susceptible to bias from review length and paper score, as there were high levels of subjectivity among reviewers. These findings suggest that integrating LLMs into the review evaluation process could standardize assessments and reduce inconsistencies. As LLM-based tools continue to evolve, they hold the potential to improve both the speed and quality of manuscript evaluations. Our experiment is the first to demonstrate how LLMs can improve the peer review process on a large scale, highlighting their practical benefits.", + "bbox": [ + 109, + 386, + 883, + 492 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "However, despite these advancements, no prior studies had specifically examined how LLMs could be used to provide feedback on peer reviews in the areas we focused on in our experiment. A study released after our ICLR experiment, however, introduced a benchmark to identify toxicity in peer reviews [42]. The authors identified four categories of toxic comments: using emotive or sarcastic language, vague or overly critical feedback, personal attacks, and excessive negativity. These categories align closely with the ones we chose for our agent to provide feedback on. The authors benchmarked several LLMs for detecting toxicity and tested their ability to revise toxic sentences, finding that human evaluators preferred $80\\%$ of these revisions. In future iterations of our Review Feedback Agent, this benchmark could offer a valuable tool for testing our pipeline's ability to detect toxicity and offer constructive feedback.", + "bbox": [ + 109, + 492, + 885, + 630 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "5 Discussion", + "text_level": 1, + "bbox": [ + 112, + 651, + 279, + 672 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Our research demonstrates the significant potential of LLM-based systems to enhance peer review quality at scale. By providing targeted feedback to reviewers at ICLR 2025, we observed meaningful improvements in review specificity, engagement, and actionability. We saw that $27\\%$ of reviewers updated their reviews, and an overwhelming majority of those who made updates incorporated at least one piece of feedback into their modifications. Blinded AI researchers found the updated reviews to be consistently more clear and informative. Furthermore, feedback intervention led to increased engagement throughout the review process, with longer reviews, rebuttals, and reviewer responses, suggesting more involved discussions between authors and reviewers.", + "bbox": [ + 109, + 681, + 883, + 801 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "We designed the AI feedback system to enhance reviews while ensuring human reviewers retain complete control. First, the AI-generated feedback was purely optional, and reviewers could decide whether to incorporate it or not; by default, they could opt out by ignoring the feedback. Second, human reviewers had full control over the final review and the scores visible to the authors. To reduce the risk of hallucination, the AI feedback had to pass several rigorous reliability tests before being shared with reviewers. Finally, no personal or identifiable information about reviewers or authors was disclosed to the agent. An IRB review deemed the system to be low risk.", + "bbox": [ + 109, + 803, + 885, + 910 + ], + "page_idx": 12 + }, + { + "type": "page_number", + "text": "13", + "bbox": [ + 488, + 935, + 509, + 948 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Going forward, there are several directions to further improve the Review Feedback Agent. Our feedback categories focused on three main areas (improving specificity, addressing misunderstandings, and ensuring professionalism). While these categories were derived from reviewer guides and previous studies and encompass the majority of author complaints, they may not capture all aspects of review quality. Expanding to other categories would be helpful. Additionally, it would be interesting to explore the use of reasoning models to generate more nuanced feedback for complex issues in reviews. Finally, the concept of developing reliability tests for LLMs is an evolving field, with new studies emerging after our experiment [43, 44], and we hope to incorporate ideas from these recent works to improve the robustness of our framework. Ultimately, we expect that running this agent at future AI conferences across a diverse range of research topics will improve its robustness and effectiveness.", + "bbox": [ + 109, + 90, + 883, + 241 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "CS conferences have long leveraged machine learning to enhance their peer review processes. One early example is the Toronto Paper Matching algorithm, which was used in NIPS 2010 to match papers with reviewers and has since been deployed by over 50 conferences [45]. However, the impact of many of these earlier applications of machine learning has not been rigorously quantified. To address this gap, we were motivated to conduct this randomized controlled study to rigorously evaluate the effects of review feedback before broader deployment. Our findings show that by striving to make reviews more informative for authors, the Review Feedback Agent has the potential to enhance the overall quality of scientific communication. As LLM capabilities continue to advance, we anticipate even more advanced systems that can provide tailored feedback to reviewers, ultimately benefiting the entire scientific community through improved peer review.", + "bbox": [ + 109, + 242, + 883, + 380 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Acknowledgements", + "text_level": 1, + "bbox": [ + 112, + 400, + 336, + 420 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "We would like to thank Celeste Martínez and Carlos Mondragon Chapa at OpenReview for their help in integrating our agent into the OpenReview interface. We would also like to thank Alex Tamkin and Anthropic for helping us increase our rate limits. Finally, we would like to thank members of the Zou group for their support and comments on this work.", + "bbox": [ + 109, + 431, + 883, + 494 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Author Contributions", + "text_level": 1, + "bbox": [ + 112, + 513, + 372, + 534 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "NT, MY, JS, and JZ designed, developed, and deployed the Review Feedback Agent, conducted analyses, and wrote the paper. AG, NP, FS, RY, and CV are program chairs of ICLR 2025 and provided guidance on the feedback study and analysis.", + "bbox": [ + 109, + 545, + 883, + 592 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 114, + 613, + 243, + 633 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Bruce Alberts, Brooks Hanson, and Katrina L. Kelner. Editorial: Reviewing peer review. Science, 321(5885):15-15, 2008.", + "[2] Jacalyn Kelly, Tara Sadeghieh, and Khosrow Adeli. Peer review in scientific publications: benefits, critiques, & a survival guide. *Ejifcc*, 25(3):227, 2014.", + "[3] Publons. Global state of peer review 2018, 2018.", + "[4] Ariful Azad and Afeefa Banu. Publication trends in artificial intelligence conferences: The rise of super prolific authors, 2024.", + "[5] Alison McCook. Is peer review broken? submissions are up, reviewers are overtaxed, and authors are lodging complaint after complaint about the process at top-tier journals. what's wrong with peer review?, 2006.", + "[6] ICLR. Iclr 2024 press release, 2024.", + "[7] Anna Rogers and Isabelle Augenstein. What can we do to improve peer review in NLP? In Trevor Cohn, Yulan He, and Yang Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1256–1262, Online, November 2020. Association for Computational Linguistics." + ], + "bbox": [ + 122, + 643, + 883, + 912 + ], + "page_idx": 13 + }, + { + "type": "page_number", + "text": "14", + "bbox": [ + 488, + 935, + 509, + 946 + ], + "page_idx": 13 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[8] Anna Rogers, Marzena Karpinska, Jordan Boyd-Graber, and Naoaki Okazaki. Program chairs' report on peer review at acl 2023. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages x1-lxxv, 2023.", + "[9] Martijn Arns. Open access is tiring out peer reviewers. Nature, 515:467, 2014.", + "[10] Corinna Cortes and Neil D. Lawrence. Inconsistency in conference peer review: Revisiting the 2014 neurips experiment, 2021.", + "[11] Anthropic. Claude 3.5 sonnet, 2024.", + "[12] Weixin Liang, Yuhui Zhang, Hancheng Cao, Binglu Wang, Daisy Yi Ding, Xinyu Yang, Kailas Vodra-halli, Siyu He, Daniel Scott Smith, Yian Yin, et al. Can large language models provide useful feedback on research papers? a large-scale empirical analysis. NEJM AI, 1(8):AIoa2400196, 2024.", + "[13] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Zhi Huang, Carlos Guestrin, and James Zou. Textgrad: Automatic \"differentiation\" via text, 2024.", + "[14] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback, 2023.", + "[15] Mohammad Hosseini and Serge P J M Horbach. Fighting reviewer fatigue or amplifying bias? considerations and recommendations for use of chatgpt and other large language models in scholarly peer review. Research Integrity and Peer Review, 2023.", + "[16] Weixin Liang, Zachary Izzo, Yaohui Zhang, Haley Lepp, Hancheng Cao, Xuandong Zhao, Lingjiao Chen, Haotian Ye, Sheng Liu, Zhi Huang, Daniel A. McFarland, and James Y. Zou. Monitoring ai-modified content at scale: A case study on the impact of chatgpt on ai conference peer reviews, 2024.", + "[17] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219, 2023.", + "[18] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023.", + "[19] ICML 2023 program committee. Icml 2023 reviewer tutorial, 2023.", + "[20] ICML 2022 Program Chairs. How to be a good reviewer? reviewer tutorial for icml 2022, 2022.", + "[21] ACL PC Chairs. Last minute reviewing advice, 2017.", + "[22] Matias Valdenegro. Lxcv @ cvpr 2021 reviewer mentoring program: And how to write good reviews, 2021.", + "[23] Isabelle Augenstein Anna Rogers. Arr reviewer guidelines, 2021.", + "[24] Nyssa J Silbiger and Amber D Stubler. Unprofessional peer reviews disproportionately harm underrepresented groups in stem. PeerJ, 7:e8247, 2019.", + "[25] Mathieu Fenniak, Matthew Stamy, pubpub zz, Martin Thoma, Matthew Peveler, exiledkingcc, and pypdf Contributors. The pypdf library, 2024.", + "[26] Marco Tulio Ribeiro and Scott Lundberg. Testing language models (and prompts) like we test software, 2023." + ], + "bbox": [ + 114, + 89, + 883, + 875 + ], + "page_idx": 14 + }, + { + "type": "page_number", + "text": "15", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 14 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[27] Alexander Goldberg, Ihsan Ullah, Thanh Gia Hieu Khuong, Benedictus Kent Rachmat, Zhen Xu, Isabelle Guyon, and Nihar B. Shah. Usefulness of llms as an author checklist assistant for scientific papers: Neurips'24 experiment, 2024.", + "[28] Alex Tamkin, Miles McCain, Kunal Handa, Esin Durmus, Liane Lovitt, Ankur Rathi, Saffron Huang, Alfred Mountfield, Jerry Hong, Stuart Ritchie, Michael Stern, Brian Clarke, Landon Goldberg, Theodore R. Sumers, Jared Mueller, William McEachen, Wes Mitchell, Shan Carter, Jack Clark, Jared Kaplan, and Deep Ganguli. Clio: Privacy-preserving insights into real-world ai use, 2024.", + "[29] Ryan Liu and Nihar B. Shah. Reviewergpt? an exploratory study on using large language models for paper reviewing, 2023.", + "[30] Som Biswas, Dushyant Dobaria, and Harris L. Cohen. Chatgpt and the future of journal reviews: A feasibility study. The Yale Journal of Biology and Medicine, 96(3):415-420, 2023.", + "[31] Weixin Liang, Yaohui Zhang, Zhengxuan Wu, Haley Lepp, Wenlong Ji, Xuandong Zhao, Hancheng Cao, Sheng Liu, Siyu He, Zhi Huang, Diyi Yang, Christopher Potts, Christopher D Manning, and James Y. Zou. Mapping the increasing use of llms in scientific papers, 2024.", + "[32] Nihar B. Shah. Challenges, experiments, and computational solutions in peer review. Commun. ACM, 65(6):76-87, May 2022.", + "[33] Simon Price and Peter A. Flach. Computational support for academic peer review: a perspective from artificial intelligence. *Commun. ACM*, 60(3):70-79, February 2017.", + "[34] Atreyi Kankanhalli. Peer review in the age of generative ai. Journal of the Association for Information Systems, 25(1), 2024.", + "[35] Ilia Kuznetsov, Osama Mohammed Afzal, Koen Dercksen, Nils Dycke, Alexander Goldberg, Tom Hope, Dirk Hovy, Jonathan K. Kummerfeld, Anne Lauscher, Kevin Leyton-Brown, Sheng Lu, Mausam, Margot Mieskes, Aurélie Néveol, Danish Pruthi, Lizhen Qu, Roy Schwartz, Noah A. Smith, Thamar Solorio, Jingyan Wang, Xiaodan Zhu, Anna Rogers, Nihar B. Shah, and Iryna Gurevych. What can natural language processing do for peer review?, 2024.", + "[36] Tiffany I Leung, Taiane de Azevedo Cardoso, Amaryllis Mavragani, and Gunther Eysenbach. Best practices for using ai tools as an author, peer reviewer, or editor. J Med Internet Res, 25:e51584, Aug 2023.", + "[37] Alessandro Checco, Lorenzo Bracciale, Pierpaolo Loreti, Stephen Pinfield, and Giuseppe Bianchi. AI-assisted peer review. Humanities and Social Sciences Communications, 2021.", + "[38] Kayvan Kousha and Mike Thelwall. Artificial intelligence to support publishing and peer review: A summary and review. Learned Publishing, 37(1):4-12, 2024.", + "[39] Xiaotian Su, Thiemo Wambsgangss, Roman Rietsche, Seyed Parsa Neshaei, and Tanja Käser. Reviewwriter: AI-generated instructions for peer review writing. In Ekaterina Kochmar, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Nitin Madnani, Anaïs Tack, Victoria Yaneva, Zheng Yuan, and Torsten Zesch, editors, Proceedings of the 18th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2023), pages 57–71, Toronto, Canada, July 2023. Association for Computational Linguistics.", + "[40] Mike D'Arcy, Tom Hope, Larry Birnbaum, and Doug Downey. Marg: Multi-agent review generation for scientific papers, 2024.", + "[41] Alexander Goldberg, Ivan Stelmakh, Kyunghyun Cho, Alice Oh, Alekh Agarwal, Danielle Belgrave, and Nihar B. Shah. Peer reviews of peer reviews: A randomized controlled trial and other experiments, 2024." + ], + "bbox": [ + 114, + 89, + 883, + 878 + ], + "page_idx": 15 + }, + { + "type": "page_number", + "text": "16", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 15 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "[42] Man Luo, Bradley Peterson, Rafael Gan, Hari Ramalingame, Navya Gangrade, Ariadne Dimarogona, Imon Banerjee, and Phillip Howard. Benchmark on peer review toxic detection: A challenging task with a new dataset, 2025.", + "[43] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests, 2024.", + "[44] Archiki Prasad, Elias Stengel-Eskin, Justin Chih-Yao Chen, Zaid Khan, and Mohit Bansal. Learning to generate unit tests for automated debugging, 2025.", + "[45] Laurent Charlin, Richard S Zemel, and Craig Boutilier. A framework for optimizing paper matching In UAI, volume 11, pages 86-95, 2011." + ], + "bbox": [ + 112, + 90, + 879, + 262 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Appendices", + "text_level": 1, + "bbox": [ + 114, + 299, + 308, + 327 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "A Agent Prompts", + "text_level": 1, + "bbox": [ + 112, + 344, + 341, + 364 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "We manually fine-tuned the following prompts for the LLMs in the Review Feedback Agent. We provide the prompts below:", + "bbox": [ + 112, + 375, + 880, + 404 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Actor Prompt", + "text_level": 1, + "bbox": [ + 138, + 417, + 243, + 431 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Here is the paper: {paper} . Here is the peer review: {review} .", + "bbox": [ + 137, + 443, + 854, + 472 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Actor System Prompt", + "text_level": 1, + "bbox": [ + 138, + 494, + 299, + 510 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "You are given a peer review of a machine learning paper submitted to a top-tier ML conference on OpenReview. Your task is to provide constructive feedback to the reviewer so that it becomes a high-quality review. You will do this by evaluating the review against a checklist and providing specific feedback about where the review fails.", + "bbox": [ + 135, + 521, + 856, + 580 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "Here are step-by-step instructions:", + "bbox": [ + 138, + 583, + 387, + 597 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Read the text of the review and the paper about which the review was written.", + "2. Evaluate every comment in the review:" + ], + "bbox": [ + 158, + 607, + 746, + 646 + ], + "page_idx": 16 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Focus on comments related to weaknesses of the paper or questions the reviewer has. Ignore any comments that are summaries of the paper or that discuss strengths of the paper.", + "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important.", + "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important.", + "- For each comment, evaluate it against the following checklist. Follow the examples for how to respond. Importantly, you should be as helpful as possible. Do no ask superficial questions or make superficial remarks, think deeply and exhibit your understanding.", + "- Most reviewer comments are already sufficiently clear and actionable. Only focus on the ones that clearly fail the checklist items below.", + "- Checklist: \n(a) Check if the reviewer requests something obviously present in the paper. Only respond if certain of the reviewer's error. If so, politely pose a question to the reviewer with" + ], + "bbox": [ + 199, + 657, + 854, + 895 + ], + "page_idx": 16 + }, + { + "type": "page_number", + "text": "17", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 16 + }, + { + "type": "text", + "text": "something like \"Does the following answer your question...?\" quote the relevant paper section verbatim using tags. Use only exact quotes and do not comment if uncertain.", + "bbox": [ + 243, + 99, + 856, + 143 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:", + "bbox": [ + 243, + 143, + 856, + 175 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Example 1:", + "bbox": [ + 254, + 178, + 357, + 191 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: In Figure 4, the efficiency experiments have no results for Transformer models, which is a key limitation of the paper.", + "* Feedback to the reviewer: Does Figure 5 of the paper answer your question? In particular: In Transformers, the proposed technique provides $25\\%$ relative improvement in wall-clock time (Figure 5) ." + ], + "bbox": [ + 272, + 195, + 854, + 273 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Example 2:", + "bbox": [ + 254, + 276, + 357, + 290 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The authors propose a new deep learning model for predicting protein-protein interactions but don't explain how they address the class imbalance in PPI datasets. Most protein pairs don't interact, creating an imbalance between positive and negative samples. It's unclear how the model balances sensitivity and specificity, which is important for systems biology applications.", + "* Feedback to the reviewer: Does section 3.3 of the paper address your concern? Specifically, the following passage: To address the class imbalance in PPI datasets, where non-interacting pairs are far more common, we employ a \"Balanced Interaction Learning\" (BIL) approach. This involves using a focal loss function to reduce the influence of easy negatives, balanced minibatch sampling to ensure a mix of positive and negative samples, and a two-stage training process with pre-training on a balanced subset before fine-tuning on the full dataset ." + ], + "bbox": [ + 272, + 292, + 854, + 492 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "- Example 3:", + "bbox": [ + 254, + 494, + 357, + 508 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: Lack of theoretical analysis of the communication complexity of the proposed method. In distributed optimization, communication complexity is crucial for minimizing inter-node communication to enhance system efficiency and reduce communication costs.", + "* Feedback to the reviewer: The paper appears to provide a theoretical analysis of communication complexity. Specifically, Theorem 3.6 states an $\\mathrm{O}(\\sqrt{\\kappa_{max}}\\log (1 / \\epsilon))$ communication complexity bound. Does this address your concern? Are there specific aspects of communication complexity analysis you feel are missing?" + ], + "bbox": [ + 272, + 512, + 854, + 651 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "(b) Look for any vague or unjustified claims in the review. This results in points that are not actionable or harder to respond to. For such cases, we would like to nudge the reviewer to provide more specific details and justify their claim.", + "bbox": [ + 215, + 652, + 856, + 698 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "First, let us define what it means for a comment to be actionable and specific enough.", + "bbox": [ + 243, + 699, + 854, + 713 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "There are a few pieces of criteria we will use to determine this:", + "bbox": [ + 243, + 714, + 697, + 728 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "i. The review comment specifies the section, paragraph, figure, or table where the issue occurs.", + "ii. The issue or concern in the review comment is explicitly stated, avoiding vague language.", + "iii. The comment explains why the identified issue is problematic and needs addressing." + ], + "bbox": [ + 246, + 731, + 854, + 827 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "iv. The reviewer provides concrete examples:", + "bbox": [ + 246, + 829, + 573, + 844 + ], + "page_idx": 17 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "A. At least one example of what they find unclear or problematic.", + "B. At least one example or suggestion of what would address their concern (e.g., specific metrics, experiments, or changes)." + ], + "bbox": [ + 264, + 847, + 854, + 895 + ], + "page_idx": 17 + }, + { + "type": "page_number", + "text": "18", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 17 + }, + { + "type": "text", + "text": "Do NOT nitpick. Most comments are already specific and actionable, and we do not want to provide feedback on those. We do NOT want to annoy reviewers with unnecessary feedback!", + "bbox": [ + 243, + 99, + 857, + 143 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:", + "bbox": [ + 243, + 145, + 856, + 175 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 1:", + "text_level": 1, + "bbox": [ + 254, + 178, + 357, + 191 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: It appears that the linear mode connectivity results may be somewhat brittle.", + "* Feedback to the reviewer: Can you elaborate on why you see the results as brittle? It may also be helpful to describe in further detail how the authors can address your concern. For example, if you believe additional experiments or theoretical analyses are needed, it may be helpful to explicitly say so." + ], + "bbox": [ + 272, + 195, + 856, + 287 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 2:", + "text_level": 1, + "bbox": [ + 254, + 291, + 357, + 306 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The paper writing is not fluent enough and needs polishing to be easier to follow.", + "* Feedback to the reviewer: It would be helpful if you could provide specific examples of sections or sentences that are difficult to follow. This would give the authors more actionable feedback." + ], + "bbox": [ + 272, + 309, + 856, + 385 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 3:", + "text_level": 1, + "bbox": [ + 254, + 388, + 357, + 404 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: In the proposed method, an additional optimization problem is required to solve every iteration, i.e., Eq. (11). Thus the proposed method seems inefficient since it is a nested-loop algorithm.", + "* Feedback to the reviewer: Your concern about efficiency is valid, but it may be helpful to describe in further detail how the authors might address your concern. For example, you could ask about the computational complexity of solving Eq. (11) compared to the overall algorithm, or request empirical runtime comparisons to existing methods. This could help the authors address the efficiency concern more concretely." + ], + "bbox": [ + 272, + 407, + 856, + 544 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 4:", + "text_level": 1, + "bbox": [ + 254, + 547, + 357, + 561 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The paper presents a limited number of baseline methods, and they are relatively outdated (between 2019 and 2021). Additionally, the paper lacks analytical experiments to substantiate that the proposed method has learned superior textual structural information.", + "* Feedback to the reviewer: To strengthen this critique, consider suggesting specific, more recent baselines that you believe should be included. Also, providing examples of analytical experiments that could effectively demonstrate superior learning of textual structural information would make this feedback more actionable for the authors." + ], + "bbox": [ + 272, + 565, + 856, + 702 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 5:", + "text_level": 1, + "bbox": [ + 254, + 705, + 357, + 720 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: One of the assumptions of this paper is that \"most GNNs perform better on homophilic graphs\". I personally do not agree with it. A part of the heterophilic graphs are easy to fit, e.g., Wisconsin with $90 + \\%$ accuracy, and some homophilic graphs are challenging. The difficulties of node classification on different datasets are not only related to the graph (label) homophily, but also related to the node features, and many other factors.", + "* Feedback to the reviewer: Your point is helpful, but it would be more actionable to ask the authors to provide evidence supporting their assumption, rather than simply disagreeing. Consider asking for specific examples or citations that demonstrate GNNs performing better on homophilic graphs." + ], + "bbox": [ + 272, + 723, + 854, + 877 + ], + "page_idx": 18 + }, + { + "type": "text", + "text": "- Example 6:", + "text_level": 1, + "bbox": [ + 254, + 880, + 357, + 895 + ], + "page_idx": 18 + }, + { + "type": "page_number", + "text": "19", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 18 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The numbers in table 1 are not described.", + "* Feedback to the reviewer: It would be helpful to specify what aspects of the numbers in Table 1 need more description. Are you referring to the meaning of the values, their units, or something else? This would help the authors provide a more targeted response." + ], + "bbox": [ + 272, + 99, + 856, + 178 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "The following are examples where the reviewer's comments are already specific and, most importantly, actionable, so you should not give any feedback:", + "bbox": [ + 243, + 180, + 854, + 210 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reviewer comment: The paper claims occupancy is increased on Page 6 but it was unclear: (i) what definition of occupancy is being used (GPU resources could mean many things and occupancy often just refers to number of warps that can concurrently run versus max number supported by hardware); and (ii) whether any measurement has been made to confirm the claimed improvement (e.g., using NVIDIA Parallel Nsight or similar approaches for collecting performance counters).", + "- Reviewer comment: Second paragraph under \"Semantic similarity\": I felt lots of details were missing here to better understand the quality of phrases, and the feasibility of the proposed approach. The Appendix A do not provide all necessary details. Is this done on the pretraining corpus? What trivial constituents were dropped out and why (some examples would help)?", + "- Reviewer comment: Some works like Saycan and RT2 also consider the match of the environment and the agent ability. Key differences between the proposed method and those existing works need to be more carefully discussed.", + "- Reviewer comment: The problem studied, and the techniques used, are closely related to Lipshitz bandits [2], pricing [3] and bilateral trade [1]. Please consider a more thorough comparison with the already known results and techniques there.", + "- Reviewer comment: In Table 3, FlashFFTConv outperforms torch.fft by up to $8.7\\mathrm{x}$ , while the speedup is about $2\\mathrm{x}$ without the domain-specific optimizations. Does it mean the major speedup comes from the domain-specific optimizations instead of the FlashFFTConv algorithm? Could the authors conduct this ablation study (with and without the domain-specific optimizations) in other experiments?", + "- Reviewer comment: Then in Section 4.2, the authors propose to give the actor past actions to help it infer the state at the current step. I don't understand why is this not done by default. In my understanding, DOMDPs are POMDPs and in POMDPs, past actions and observations should always be given to the policy for optimal control. I don't see how this is an innovation." + ], + "bbox": [ + 254, + 213, + 856, + 633 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "If a reviewer asks a question that is already clear, you do not need to give feedback on it or rephrase it. Questions need to be clear and specific, but they do not necessarily need to be actionable as they represent a reviewer's confusion. To be precise, in most cases if a comment ends in '?' you should ONLY give feedback if the question itself is unclear.", + "bbox": [ + 243, + 636, + 856, + 710 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "Here are some examples of reviewer comments that are clear and specific, and therefore do not need feedback:", + "bbox": [ + 243, + 710, + 856, + 739 + ], + "page_idx": 19 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reviewer comment: 4) In Figure 6, Spearman rank correlation scores for HCMs are reported. As far as I know, Spearman rank correlation calculates the correlation between two variables. How was the correlation computed from multiple runs in this case?", + "- Reviewer comment: While there are detailed information about training procedure, not much is written about the actual inference step. For instance, how many samples for each prototype are required for reliable performance?" + ], + "bbox": [ + 254, + 744, + 856, + 852 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": "(c) If the reviewer claims the paper lacks novelty, ensure they specify why, including references to similar work. If they haven't, we would like to nudge the reviewer to", + "bbox": [ + 217, + 854, + 856, + 883 + ], + "page_idx": 19 + }, + { + "type": "page_number", + "text": "20", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 19 + }, + { + "type": "text", + "text": " justify the claim, by prompting them to provide the most relevant references, the relationships, and specifying similarities or differences.", + "bbox": [ + 241, + 99, + 856, + 128 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:", + "bbox": [ + 241, + 128, + 856, + 159 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Example 1:", + "text_level": 1, + "bbox": [ + 254, + 162, + 357, + 176 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The paper's novelty is limited considering the ICLR standards.", + "* Feedback to the reviewer: It would be really helpful to the authors if you consider discussing the reasons for why the novelty is limited, and specify what ICLR standards are in this context. In particular, it would be very helpful if you give examples of the closest papers, their similarities, and differences with the methods or results in the current paper." + ], + "bbox": [ + 272, + 180, + 856, + 287 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Example 2:", + "text_level": 1, + "bbox": [ + 254, + 291, + 357, + 305 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The novelty of this work is not clear from the conclusion and experiments now.", + "* Feedback to the reviewer: To make this feedback more actionable, it would be helpful to specify which aspects of novelty are unclear or missing. Are there particular claims or contributions that need more justification? Providing concrete suggestions for how the authors could better highlight the novelty would give them clearer guidance." + ], + "bbox": [ + 272, + 308, + 856, + 416 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "- Example 3:", + "text_level": 1, + "bbox": [ + 254, + 419, + 357, + 434 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The proposed method is not innovative enough. I'm not an expert in this field, so I'm not sure about it.", + "* Feedback to the reviewer: It would be helpful if you could elaborate on why you think the method may not be innovative enough, even if you're not an expert. Are there specific aspects that seem similar to existing work? If you're uncertain about the novelty, it's best to phrase this as a question or area for clarification rather than a definitive weakness. For example, you could ask the authors to further explain how their approach differs from or improves upon existing methods for training vision-language models for satellite imagery." + ], + "bbox": [ + 272, + 436, + 856, + 575 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The following are examples where the reviewer's discussion of novelty is already detailed and actionable as written, so you should not give any feedback:", + "bbox": [ + 243, + 577, + 854, + 607 + ], + "page_idx": 20 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reviewer comment: DASHA is a mash-up between MARINA and existing distributed nonconvex optimization methods. Other than the fact that three variants of DASHA get rid of the uncompressed synchronization in MARINA, this reviewer could not pinpoint a difference between MARINA and DASHA. As such, the main novelty of this work seems to be in terms of theoretical analysis of MARINA when the uncompressed synchronization step is removed. The authors could have done a better job of clarifying where does this novelty lie in the analysis (e.g., pinpointing the key analytical approaches in the lemma that helped improve the analysis)", + "- Reviewer comment: I'm not sure the paper has sufficient novelty to be published in the top-tier conference since the proposed method only goes one step further from Task Arithmetic [1] and TIES-MERGING [2] by incorporating trainable weights for task vectors. The concept seems thin to support an entire paper, with only one page (page 6) dedicated to the novel part." + ], + "bbox": [ + 254, + 609, + 856, + 808 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "(d) Identify any personal attacks or inappropriate remarks made by the reviewer. This can be about the personality, the knowledge, or the experience of the authors. For example, they call the work \"incompetent\" without justifying why. For this case, we would like to kindly warn the reviewer about their comment and politely suggest they revise their language.", + "bbox": [ + 215, + 811, + 856, + 887 + ], + "page_idx": 20 + }, + { + "type": "page_number", + "text": "21", + "bbox": [ + 488, + 935, + 506, + 946 + ], + "page_idx": 20 + }, + { + "type": "text", + "text": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:", + "bbox": [ + 243, + 99, + 856, + 130 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Example 1:", + "bbox": [ + 254, + 133, + 357, + 147 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: The authors clearly do not live in the real world and do not care about people or downstream effects of their research.", + "* Feedback to the reviewer: We kindly suggest you revise this comment, as it includes remarks about the personalities or intents of the authors." + ], + "bbox": [ + 272, + 148, + 856, + 212 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Example 2:", + "bbox": [ + 254, + 215, + 357, + 229 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: This paper is embarrassing, and you are clearly not fit to be in research.", + "* Feedback to the reviewer: We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors." + ], + "bbox": [ + 272, + 233, + 856, + 310 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- Example 3:", + "bbox": [ + 254, + 314, + 357, + 328 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "* Reviewer comment: This MC-IS method for estimating the score will NEVER work well in high dimensions due to variance and thus why works such as [1,2,3,4] which are clearly aware of this formulation (as they either state it in their appendices or use it for subsequent calculation) pursue an optimization alternative to estimating the drift.", + "* Feedback to the reviewer: Consider revising this comment to avoid absolute statements like \"NEVER\". Instead, you could phrase it as a concern about scalability to high dimensions, and ask the authors to address this limitation or provide evidence that it can work in higher dimensions." + ], + "bbox": [ + 272, + 330, + 856, + 468 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "3. Provide feedback:", + "bbox": [ + 158, + 479, + 308, + 494 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "- For each comment that fails according to the checklist, write concise feedback in the following format:", + "bbox": [ + 197, + 505, + 856, + 534 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Comment: the verbatim comment of interest", + "- Feedback: your concise feedback" + ], + "bbox": [ + 225, + 540, + 568, + 571 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- If you do not identify any issues with a comment, do not include it in your feedback list.", + "- If you find no issues in the review at all, respond with: 'Thanks for your hard work!'" + ], + "bbox": [ + 197, + 577, + 849, + 613 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Remember:", + "text_level": 1, + "bbox": [ + 138, + 623, + 223, + 636 + ], + "page_idx": 21 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Be concise, limiting your feedback for each comment to 1-2 sentences.", + "- Do not summarize your feedback at the end or include a preamble at the beginning.", + "- Do not repeat anything the reviewer already included in their review, and do not praise anything the reviewer wrote as we want to provide constructive feedback.", + "- Your feedback will be sent to reviewers. Do not mention that you are using a checklist or guidelines.", + "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer.", + "- Do not provide feedback to any comments that mention a score or rating. You do not care about the reviewer's score or rating for this paper.", + "- Do not provide feedback to any comments that discuss typos." + ], + "bbox": [ + 161, + 648, + 856, + 875 + ], + "page_idx": 21 + }, + { + "type": "page_number", + "text": "22", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 21 + }, + { + "type": "text", + "text": "Aggregator Prompt", + "text_level": 1, + "bbox": [ + 140, + 93, + 282, + 107 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the paper: {paper} .", + "bbox": [ + 138, + 119, + 473, + 135 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here are the lists of feedback: {feedbacks} .", + "bbox": [ + 138, + 136, + 728, + 150 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the peer review: {review} .", + "bbox": [ + 138, + 151, + 545, + 166 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Aggregator System Prompt", + "text_level": 1, + "bbox": [ + 140, + 188, + 338, + 203 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "You will be given multiple lists of feedback about a peer review of a machine learning paper submitted to a top-tier ML conference. The aim of the feedback is to guide a reviewer to make the review high-quality. Your task is to aggregate the lists of feedback into one list.", + "bbox": [ + 138, + 215, + 857, + 260 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here are the guidelines that were followed to generate the feedback lists originally: {ACTOR_SYSTEM_CHART} ", + "bbox": [ + 138, + 275, + 856, + 305 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here are step-by-step instructions:", + "bbox": [ + 138, + 306, + 387, + 320 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Read the multiple feedback lists provided for that review, the text of the review, and the paper about which the review was written.", + "2. For all feedback lists, aggregate them into one list with the best comment-feedback pairs from each list:" + ], + "bbox": [ + 158, + 330, + 854, + 398 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For each comment-feedback pair in the multiple lists that are similar, determine which provides the best feedback and keep only that pair.", + "- If there are unique comment-feedback pairs in the multiple lists, critically determine if it is an essential piece of feedback needed to improve the review. If it is unnecessary or redundant, remove the comment-feedback pair.", + "- You should end up with one feedback list that has no repeated comments from the review and that is high quality.", + "- Return the feedback list in the format you received it in, where the pairs are formatted as:" + ], + "bbox": [ + 199, + 411, + 856, + 547 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Comment: {{the verbatim comment of interest}}", + "- Feedback: {{your concise feedback}}" + ], + "bbox": [ + 227, + 551, + 612, + 585 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Critic Prompt", + "text_level": 1, + "bbox": [ + 140, + 607, + 243, + 621 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the paper: {paper} .", + "bbox": [ + 138, + 633, + 473, + 648 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the feedback: {feedback} .", + "bbox": [ + 138, + 650, + 576, + 665 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Here is the peer review: {review} .", + "bbox": [ + 138, + 666, + 545, + 680 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Remember:", + "bbox": [ + 138, + 695, + 222, + 708 + ], + "page_idx": 22 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- You are a critic that will help reviewers improve their comments and reviews. Your valuable feedback will help improve their review.", + "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + ], + "bbox": [ + 163, + 720, + 854, + 789 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "Critic System Prompt", + "text_level": 1, + "bbox": [ + 140, + 810, + 300, + 825 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "You are a critic that will help reviewers improve their reviews. You are given a list of feedback to the reviewer comments of a machine learning paper submitted to a top-tier ML conference on OpenReview. The aim of the feedback is to guide a reviewer to improve their comments and re", + "bbox": [ + 138, + 835, + 857, + 883 + ], + "page_idx": 22 + }, + { + "type": "page_number", + "text": "23", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 22 + }, + { + "type": "text", + "text": "view as a whole. Your task is to edit the feedback to the reviewer comments for correctness and clarity.", + "bbox": [ + 138, + 99, + 854, + 114 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Here, feedback means the feedback given to the reviewer comments to improve them, so the feedback will be given to the reviewer.", + "bbox": [ + 137, + 128, + 856, + 159 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Here are the guidelines that were followed to generate the feedback to the reviewer comments originally: {ACTOR_SYSTEM_PROMPT} . You should keep in mind to adhere to the above guidelines.", + "bbox": [ + 137, + 175, + 856, + 220 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Here are step-by-step instructions:", + "bbox": [ + 138, + 236, + 387, + 251 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Read the feedback list provided for reviewer comments, the full text of the review itself, and the paper about which the review was written.", + "2. Evaluate every piece of feedback in the feedback list:" + ], + "bbox": [ + 156, + 260, + 854, + 315 + ], + "page_idx": 23 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- For each feedback item, it is imperative that you evaluate the correctness of the feedback. If there is a quote in the feedback, ensure that the quote appears verbatim in the paper. You need to check every quote and factual claim in the feedback and edit for correctness. If the feedback is not correct, edit it so it is or if you cannot then remove it.", + "- For each feedback item, evaluate if it is clear. You should make sure it would not confuse or frustrate the reviewer who reads it." + ], + "bbox": [ + 197, + 325, + 854, + 419 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "3. Remove comment-feedback pairs that are too nitpicky, unnecessary, or superficial. Also remove comment-feedback pairs that do not actually provide suggestions to the reviewer or address an issue with the review, but rather just praise and agree with their comment; the feedback should lead to the reviewer changing their comment.", + "bbox": [ + 156, + 430, + 854, + 489 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Here are some examples of comment-feedback pairs that should be entirely removed from the final feedback list:", + "bbox": [ + 176, + 494, + 854, + 523 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "(a) Reviewer comment: The novelty remains concerned. It seems that the algorithm combines MLR + a bisimulation loss.", + "bbox": [ + 184, + 535, + 854, + 564 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Feedback to the reviewer: It would be helpful if you could elaborate on why you see the novelty as limited. In particular, it would be very helpful if you could discuss the key differences between simply combining MLR and bisimulation loss versus the approach proposed in this paper. This would give the authors a better chance to clarify the novelty of their work.", + "bbox": [ + 215, + 568, + 854, + 643 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "(b) Reviewer comment: The paper lacks a proper related work section, which makes it challenging for readers to quickly grasp the background and understand the previous works. It is crucial to include a comprehensive discussion on related works, especially regarding the variance-reduced ZO hard-thresholding algorithm and the variance reduction aspect.", + "bbox": [ + 184, + 648, + 854, + 723 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Feedback to the reviewer: To make this comment more actionable, consider suggesting specific areas of related work that should be covered, such as key papers on ZO optimization, hard-thresholding methods, and variance reduction techniques in optimization. This will give the authors clearer guidance on what to include.", + "bbox": [ + 215, + 726, + 854, + 786 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "(c) Reviewer comment: The paper is not very well-presented and is hard to follow. First of all, it is unclear in the hybrid setting considered, what are the relative relations of the guest parties? In the introduction, it appears that they share the same feature space but have different sample IDs, however, in 3.1 they appear to have different dimensions and unclear alignment. It is suggested that the paper properly define the problem setting. A figure on how data is partitioned by different parties would also help.", + "bbox": [ + 186, + 791, + 854, + 896 + ], + "page_idx": 23 + }, + { + "type": "page_number", + "text": "24", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 23 + }, + { + "type": "text", + "text": "Feedback to the reviewer: Your suggestion for a clearer definition of the problem setting and a visual representation of data partitioning is excellent. This would significantly improve the paper's clarity and readability.", + "bbox": [ + 215, + 99, + 856, + 145 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "(d) Reviewer comment: 3) the model performance of the proposed methods still appear to be a little inferior to the centralized setting, not exactly \"comparable\" as claimed. It is important to understand whether the proposed method is \"lossless\" or \"lossy\" and why. I think more detailed examinations and explanations are needed here.", + "bbox": [ + 186, + 148, + 856, + 210 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Feedback to the reviewer: Your observation about the performance gap between the proposed method and the centralized setting is insightful. Requesting a more detailed analysis of whether the method is lossless or lossy, along with explanations for any performance differences, would significantly enhance the paper's contribution.", + "bbox": [ + 215, + 213, + 856, + 273 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "(e) Reviewer comment: Q2: It appears that the introduced projection loss can be directly optimized with respect to the trigger $T$ . What's the rationale behind setting an upper bound and optimizing the projection loss through this bound? Does this approach offer computational benefits?", + "bbox": [ + 187, + 277, + 856, + 338 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Feedback to the reviewer: This question effectively probes the authors' methodological choices. It's a clear and concise query that could lead to valuable insights about the paper's approach. The authors' response could provide important context about the trade-offs involved in their method.", + "bbox": [ + 215, + 340, + 856, + 401 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "4. Edit comments based on evaluations:", + "bbox": [ + 158, + 411, + 447, + 426 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Do not add any new points unless the previous feedback obviously missed something important.", + "- If you do not identify any issues with a comment-feedback pair, do not edit it." + ], + "bbox": [ + 197, + 436, + 854, + 488 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "5. The feedback will be shared with the reviewers for them to improve their comments. Address the reviewer in the second person (e.g., \"you\") and do not refer to them as \"the reviewer.\"", + "6. Return the feedback list in the format you received it in, where the pairs are formatted as:" + ], + "bbox": [ + 156, + 496, + 856, + 553 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Comment: {{the verbatim comment of interest}}", + "- Feedback: {{your concise feedback}}" + ], + "bbox": [ + 199, + 561, + 581, + 598 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Remember:", + "text_level": 1, + "bbox": [ + 138, + 608, + 236, + 622 + ], + "page_idx": 24 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- You are a critic that will help reviewers improve their comments and reviews.", + "- Be concise, limiting your feedback for each reviewer comment to 1-2 sentences.", + "- Do not summarize your feedback at the end or include a preamble at the beginning.", + "- Do not repeat anything the reviewer already included in their review.", + "- Do not mention that you are using a checklist or guidelines.", + "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + ], + "bbox": [ + 161, + 633, + 856, + 787 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Formatter Prompt", + "text_level": 1, + "bbox": [ + 138, + 808, + 274, + 823 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Here is the feedback for you to format: {feedback}", + "bbox": [ + 138, + 835, + 511, + 852 + ], + "page_idx": 24 + }, + { + "type": "page_number", + "text": "25", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 24 + }, + { + "type": "text", + "text": "Formatter System Prompt", + "text_level": 1, + "bbox": [ + 138, + 93, + 331, + 107 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "You will be given a set of feedback given to various reviewer comments in a peer review of a machine learning paper. Your response, which will be the list of reviewer comments and feedback to them, will be shared with the reviewers who wrote the review, so that they can improve their reviews and the peer review cycle.", + "bbox": [ + 135, + 119, + 857, + 180 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Your task is to format the feedback into a structured format. You should format the feedback as a list of comment-feedback pairs:", + "bbox": [ + 135, + 195, + 857, + 226 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Reviewer comment: $\\{\\{\\mathrm{a~comment}\\} \\}$", + "- Feedback to the reviewer: {{feedback to the comment}}", + "- Reviewer comment: {{another comment}}", + "- Feedback to the reviewer: {{feedback to the comment}}" + ], + "bbox": [ + 161, + 238, + 612, + 329 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "中", + "bbox": [ + 179, + 338, + 197, + 347 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "Your goal is to only keep feedback to the reviewers that can help them improve their comments. You should only pay attention to lines that start with \"Comment\" or \"Feedback\".", + "bbox": [ + 135, + 361, + 857, + 391 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Only keep the comment-feedback pairs where the feedback can help improve the reviewer. If there is no suggestion for improvement, remove the entire comment-feedback pair.", + "bbox": [ + 161, + 402, + 857, + 434 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- Here is an example of a comment-feedback pair that should be removed from the final feedback list:", + "bbox": [ + 194, + 444, + 856, + 473 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "* Reviewer comment: Section 2.2. \"It independently formulates new approaches\" → Is it a hallucination or a feature? It looks like a hallucination to me. If this is important for achieving good performance, can you provide an ablation study based on whether to allow new approaches or not?", + "bbox": [ + 228, + 479, + 856, + 540 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "* Feedback to the reviewer: This is a thoughtful question about an important aspect of the methodology. Your suggestion for an ablation study is particularly valuable and could provide insights into the method's effectiveness.", + "bbox": [ + 228, + 542, + 856, + 588 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "- If the feedback says \"No changes needed\" or something with a similar meaning, remove the entire comment-feedback pair.", + "bbox": [ + 194, + 592, + 856, + 623 + ], + "page_idx": 25 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- Do not modify the content of the feedback at all, only format it into the bullet point format described above.", + "- The response you send will be immediately shared with the reviewers. Thus, there should be NO OTHER TEXT in the output, for example no preamble or conclusion sentences. Only respond with the list of feedback & reviewer comment bullets, and no other text.", + "- Since your response will immediately be sent to the reviewers, if there is no feedback, just say \"Thanks for your hard work!\"." + ], + "bbox": [ + 161, + 632, + 856, + 758 + ], + "page_idx": 25 + }, + { + "type": "page_number", + "text": "26", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 25 + }, + { + "type": "text", + "text": "We also provide the prompt used for the incorporation analysis:", + "bbox": [ + 137, + 90, + 599, + 107 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Incorporation Analysis Prompt", + "text_level": 1, + "bbox": [ + 138, + 116, + 366, + 132 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Task: Determine if the following feedback suggestion was incorporated into the modified version of a review. Also, categorize the given feedback into exactly one of these three categories:", + "bbox": [ + 137, + 143, + 859, + 175 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. ACTIONABLE_VAGUE: Encouraging reviewers to rephrase vague review comments, making them more actionable for the authors. For example, the feedback says: \"It would be helpful to suggest specific baselines that you think must be included. Are there particular methods you feel are missing from the current comparison? Could you elaborate why?\"", + "2. CONTENTClarIFY: Highlighting sections of the paper that may already address some of the reviewer's questions (clarifying content). For example, the feedback says: \"Does Figure 5 of the paper answer your question? In particular: 'In Transformers, the proposed technique provides $25\\%$ relative improvement in wall-clock time (Figure 5)'.\"", + "3. ADDRESS_UNPROFESSIONAL: Identifying and addressing unprofessional or inappropriate remarks in the review. For example, the feedback says: \"We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors.\"" + ], + "bbox": [ + 156, + 184, + 856, + 385 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Instructions:", + "text_level": 1, + "bbox": [ + 138, + 396, + 246, + 410 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "1. Read the original review and modified review.", + "2. Read the reviewer's original comment and the feedback given to the reviewer.", + "3. Determine if the changes suggested in the feedback were incorporated into the modified review as compared to the original review. If the reviewer's original comment appears verbatim in the modified review still, you should return FALSE for the incorporation. The incorporations should be clear and quite explicit. Think critically about if the incorporation is significant enough to count.", + "4. Determine which of the three categories best describes the primary purpose of the feedback.", + "5. Think step by step and explain your reasoning." + ], + "bbox": [ + 158, + 421, + 856, + 597 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Output Format: Please provide your final answer as two comma-separated values between tags, where:", + "bbox": [ + 137, + 607, + 856, + 637 + ], + "page_idx": 26 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The first boolean is TRUE or FALSE depending on whether the feedback was incorporated.", + "- The second string is one of these three options: ACTIONABLE_VAGUE, CONTENT_CLRIFY, or ADDRESS_UNPROFESSIONAL." + ], + "bbox": [ + 161, + 647, + 856, + 702 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Example: TRUE, ACTIONABLE_VAGUE", + "bbox": [ + 138, + 712, + 558, + 728 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "B Reliability tests", + "text_level": 1, + "bbox": [ + 112, + 760, + 346, + 780 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "We generated the following reliability tests to be run in real-time after feedback was generated. For each reliability test, we provide examples of feedback that would fail it:", + "bbox": [ + 111, + 791, + 883, + 821 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "1. Praising the reviewer: make sure the feedback does not simply praise what the reviewer wrote without providing critical suggestions to improve their comment.", + "bbox": [ + 132, + 828, + 879, + 857 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "Example feedback: \"This is a good question that challenges a key assumption of the paper.\"", + "bbox": [ + 151, + 858, + 816, + 875 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "2. Addressing feedback to the author: certify that the feedback is addressed to the reviewer with suggestions to make their review better, rather than addressed to the author of the paper with suggestions", + "bbox": [ + 130, + 881, + 883, + 912 + ], + "page_idx": 26 + }, + { + "type": "page_number", + "text": "27", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 26 + }, + { + "type": "text", + "text": "on how they can improve their paper.", + "bbox": [ + 151, + 90, + 426, + 104 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Example feedback: \"To strengthen your paper, consider discussing the relationship between FrugalGPT and traditional ensembling techniques. Highlight both similarities and differences and explain how this relates to the observed quality improvements. This would provide more context for your results and situate your work within the broader field of machine learning.\"", + "bbox": [ + 151, + 106, + 883, + 167 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "3. Restate what the reviewer wrote: does the feedback simply restate what the review comment says without providing any new meaningful and unique suggestions?", + "bbox": [ + 130, + 176, + 883, + 205 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Example reviewer comment: Can examples or further clarification be given for the 3.1 sentence \"enhancing the accountability of the output\"? This isn't clear, at least to me.", + "bbox": [ + 150, + 207, + 880, + 236 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "Example feedback: This is a good point that could lead to improved clarity in the paper. To make your comment more actionable, you could ask the authors to provide examples or further clarification for the sentence \"enhancing the accountability of the output\".", + "bbox": [ + 150, + 237, + 883, + 282 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "4. Format is correct: ensure that all feedback pairs are in the correct format, protecting against any errors in the pipeline that could have led to malformed feedback.", + "bbox": [ + 130, + 292, + 883, + 321 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "C Average score changes during review and rebuttal periods", + "text_level": 1, + "bbox": [ + 111, + 345, + 834, + 364 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In Figure S1A, we examined the potential change in review scores (soundness, presentation, contribution, rating, and confidence) between the initial and modified reviews across the groups during the review period. We found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). We also saw that of reviewers who received feedback, reviewers who updated their review were significantly more likely to decrease their soundness score and increase their confidence score at the end of the review period (before the rebuttal period began) compared to those who did not update their review. This suggests that reviewers who updated their reviews became more confident in their assessments.", + "bbox": [ + 109, + 376, + 883, + 494 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "In Figure S1B, we conducted the same analysis during the rebuttal period. Similar to the review period, we found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). Of reviewers who received feedback, those who updated their reviews significantly increased all scores except confidence compared to those who did not update their reviews. From this, we see that reviewers who updated their reviews were much more engaged in the rebuttal process.", + "bbox": [ + 109, + 497, + 883, + 574 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "D Incorporation model validation", + "text_level": 1, + "bbox": [ + 111, + 595, + 522, + 616 + ], + "page_idx": 27 + }, + { + "type": "text", + "text": "To test our incorporation model, we hand-labeled a test set of 222 feedback items (from 63 randomly chosen reviews that had been updated) as being incorporated into the updated review or not. We labeled 132 of those items as incorporated (59.5%) and 90 as not (40.5%). We then ran those 222 feedback items through the LLM pipeline and received a 92% accuracy rate, with a false negative rate of 0.9% and a false positive rate of 5.9% (see Supplementary Figure S2). Of the false positives, 8/13 were instances of human error where the labeler missed that the item was incorporated into the review, and the model accurately identified this incorporation. The remaining 5 false positives were due to subjectivity - the model reasoned that the reviewer partially incorporated the sentiments of the feedback, whereas the labeler did not view that as sufficient enough to count as incorporated. The two false negatives represent data points the labeler initially mislabeled and the model correctly labeled. This effectively gives us a false negative rate of 0% and a false positive rate of 2.25%, allowing us to be confident that our incorporation pipeline was highly accurate.", + "bbox": [ + 109, + 626, + 883, + 794 + ], + "page_idx": 27 + }, + { + "type": "page_number", + "text": "28", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 27 + }, + { + "type": "image", + "img_path": "images/9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg", + "image_caption": [ + "A", + "Supplementary Figure S1: (A) Review period score changes. (Top) There is no significant difference in updating scores (measured between initial and pre-rebuttal reviews) between the feedback and control groups. (Bottom) Among reviewers who received feedback, those who updated their reviews were more likely to decrease soundness scores $(\\mathrm{p} \\leq 0.05)$ and increase confidence scores $(\\mathrm{p} \\leq 0.05)$ compared to those who did not update their reviews. (B) Rebuttal period score changes. (Top) There is no significant difference in updating scores (measured between pre- and post-rebuttal reviews) between the feedback and control groups. (Bottom) Among feedback recipients, reviewers who updated their reviews demonstrated significantly larger score increases across all metrics (soundness: $^{**}\\mathrm{p} \\leq 0.01$ ; presentation: $^{***}\\mathrm{p} \\leq 0.001$ ; contribution: $^{\\ast}\\mathrm{p} \\leq 0.05$ ; rating: $^{***}\\mathrm{p} \\leq 0.001$ ) except confidence, compared to non-updaters." + ], + "image_footnote": [], + "bbox": [ + 127, + 223, + 485, + 613 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 522, + 223, + 875, + 613 + ], + "page_idx": 28 + }, + { + "type": "page_number", + "text": "29", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 28 + }, + { + "type": "image", + "img_path": "images/c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg", + "image_caption": [ + "A" + ], + "image_footnote": [], + "bbox": [ + 205, + 280, + 230, + 303 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Original review", + "bbox": [ + 233, + 282, + 352, + 300 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg", + "image_caption": [ + "B" + ], + "image_footnote": [], + "bbox": [ + 205, + 323, + 230, + 345 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Modified review", + "bbox": [ + 230, + 325, + 359, + 340 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 205, + 364, + 230, + 388 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Feedback item", + "bbox": [ + 232, + 368, + 352, + 383 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 421, + 316, + 589, + 352 + ], + "page_idx": 29 + }, + { + "type": "text", + "text": "Feedback incorporated (Y/N)", + "bbox": [ + 604, + 316, + 795, + 352 + ], + "page_idx": 29 + }, + { + "type": "image", + "img_path": "images/db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg", + "image_caption": [ + "Supplementary Figure S2: (A) Incorporation model pipeline. Given the original review text, modified review text, and individual feedback item, the LLM determined if the feedback was incorporated into the modified review or not. (B) Model accuracy. Our incorporation model successfully labeled $92\\%$ of the test feedback items, where human annotators determined the ground truth labeling. Of the false positives, the majority were instances of human error where the model accurately identified the missed incorporation. All of the false negatives were instances of human error that the model caught." + ], + "image_footnote": [], + "bbox": [ + 344, + 412, + 643, + 601 + ], + "page_idx": 29 + }, + { + "type": "page_number", + "text": "30", + "bbox": [ + 488, + 935, + 508, + 946 + ], + "page_idx": 29 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_model.json b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_model.json new file mode 100644 index 0000000000000000000000000000000000000000..65cf6b27edb28b842e903df03b3e253d50d7b81a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_model.json @@ -0,0 +1,5441 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.271, + 0.061, + 0.707 + ], + "angle": 270, + "content": "arXiv:2504.09737v1 [cs.AI] 13 Apr 2025" + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.136, + 0.885, + 0.188 + ], + "angle": 0, + "content": "Can LLM feedback enhance review quality? A randomized study of 20K reviews at ICLR 2025" + }, + { + "type": "text", + "bbox": [ + 0.212, + 0.205, + 0.784, + 0.242 + ], + "angle": 0, + "content": "Nitya Thakkar1, Mert Yuksekgonul1, Jake Silberg1, Animesh Garg2, Nanyun Peng3, Fei Sha4, Rose Yu5, Carl Vondrick6, James Zou1" + }, + { + "type": "text", + "bbox": [ + 0.425, + 0.243, + 0.574, + 0.259 + ], + "angle": 0, + "content": "\\(^{1}\\)Stanford University" + }, + { + "type": "text", + "bbox": [ + 0.381, + 0.261, + 0.617, + 0.277 + ], + "angle": 0, + "content": "2Georgia Institute of Technology" + }, + { + "type": "text", + "bbox": [ + 0.364, + 0.278, + 0.635, + 0.294 + ], + "angle": 0, + "content": "3University of California, Los Angeles" + }, + { + "type": "text", + "bbox": [ + 0.435, + 0.296, + 0.563, + 0.311 + ], + "angle": 0, + "content": "4Google Research" + }, + { + "type": "text", + "bbox": [ + 0.369, + 0.313, + 0.629, + 0.329 + ], + "angle": 0, + "content": "5University of California, San Diego" + }, + { + "type": "text", + "bbox": [ + 0.421, + 0.331, + 0.578, + 0.347 + ], + "angle": 0, + "content": "\\(^{6}\\)Columbia University" + }, + { + "type": "title", + "bbox": [ + 0.463, + 0.378, + 0.534, + 0.392 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.397, + 0.846, + 0.648 + ], + "angle": 0, + "content": "Peer review at AI conferences is stressed by rapidly rising submission volumes, leading to deteriorating review quality and increased author dissatisfaction. To address these issues, we developed Review Feedback Agent, a system leveraging multiple large language models (LLMs) to improve review clarity and actionability by providing automated feedback on vague comments, content misunderstandings, and unprofessional remarks to reviewers. Implemented at ICLR 2025 as a large randomized control study, our system provided optional feedback to more than 20,000 randomly selected reviews. To ensure high-quality feedback for reviewers at this scale, we also developed a suite of automated reliability tests powered by LLMs that acted as guardrails to ensure feedback quality, with feedback only being sent to reviewers if it passed all the tests. The results show that \\(27\\%\\) of reviewers who received feedback updated their reviews, and over 12,000 feedback suggestions from the agent were incorporated by those reviewers. This suggests that many reviewers found the AI-generated feedback sufficiently helpful to merit updating their reviews. Incorporating AI feedback led to significantly longer reviews (an average increase of 80 words among those who updated after receiving feedback) and more informative reviews, as evaluated by blinded researchers. Moreover, reviewers who were selected to receive AI feedback were also more engaged during paper rebuttals, as seen in longer author-reviewer discussions. This work demonstrates that carefully designed LLM-generated review feedback can enhance peer review quality by making reviews more specific and actionable while increasing engagement between reviewers and authors. The Review Feedback Agent is publicly available at https://github.com/zou-group/review_feedback_agent." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.67, + 0.304, + 0.687 + ], + "angle": 0, + "content": "1 Introduction" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.701, + 0.884, + 0.881 + ], + "angle": 0, + "content": "Scientific peer review is a critical step before publication, where domain experts evaluate the research to ensure thoroughness and scientific integrity, prevent false claims, and provide a strong foundation for future work [1, 2]. High-quality reviews are essential for authors to improve their work, address key limitations, and advance scientific progress. However, in a survey of 11,800 researchers worldwide, while \\(98\\%\\) view peer review as essential to maintaining the quality and integrity of academic communication, only \\(55.4\\%\\) expressed satisfaction with the quality of reviews they receive [3]. This dissatisfaction has grown as obtaining constructive and high-quality peer reviews has become more challenging due to the increase in the number of paper submissions, especially in fast-moving areas like Artificial Intelligence (AI) [4, 5]. For example, the International Conference on Learning Representations (ICLR) experienced year-over-year submission increases of \\(47\\%\\) in 2024 and \\(61\\%\\) in 2025 [6]. To maintain a rigorous and meaningful peer review process amid this growth, it is crucial to address the growing burden on reviewers and the subsequent deterioration in review quality." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.882, + 0.884, + 0.914 + ], + "angle": 0, + "content": "Authors at AI conferences increasingly report receiving short, vague reviews with criticisms like 'not novel' or 'not state-of-the-art (SOTA)' [7]. At the 2023 Association for Computational Linguistics meeting," + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "1" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.211 + ], + "angle": 0, + "content": "authors flagged \\(12.9\\%\\) of reviews for poor quality, primarily due to these vague, surface-level criticisms [8]. The peer review system is further strained by reviewers being assigned papers outside their expertise [9] and the same papers being reviewed multiple times due to high rejection rates [1]. Additionally, the 2014 NeurIPS Experiment highlighted inconsistencies in the peer review process by showing that approximately \\(25\\%\\) of paper acceptance decisions differed between two independent review committees [10]. These issues not only frustrate authors but potentially allow weaker research to be accepted while strong work is rejected, ultimately preventing papers from reaching their full potential due to the decline of meaningful dialogue between reviewers and authors." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.212, + 0.884, + 0.288 + ], + "angle": 0, + "content": "Large language models (LLMs) [11] have the potential to enhance the quality and usefulness of peer reviews for authors [12]. Recent studies demonstrated that LLMs can serve as effective critics, generating detailed and constructive feedback [13, 14]. Furthermore, LLMs have already shown high utilization in the peer review process. Reviewers are increasingly turning to LLMs to assist in drafting their reviews, with an estimated \\(10.6\\%\\) of reviewers at ICLR 2024 using LLMs for this purpose [15, 16]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.288, + 0.885, + 0.438 + ], + "angle": 0, + "content": "To explore how LLMs can improve review quality at scale, we introduce Review Feedback Agent, a multi-LLM system designed to enhance the clarity and actionability of reviews by providing feedback to reviewers. Piloted at ICLR 2025 as a large randomized control study, our agent provided feedback to over 20,000 randomly selected reviews (representing half of all ICLR 2025 reviews) over four weeks from October 15 to November 12, 2024. The generated feedback primarily focused on minimizing instances of vague and unjustified comments while also addressing content misinterpretations and unprofessional remarks. Using Claude Sonnet 3.5 as the backbone [11], we created a system of five LLMs that collaborated to generate high-quality feedback. To enhance the system's reliability against potential errors or failures in instruction-following [17, 18], we developed a set of reliability tests to evaluate specific qualities of the generated feedback; the feedback was only posted if it passed all of these tests." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.439, + 0.884, + 0.559 + ], + "angle": 0, + "content": "Summary of main findings. Of the randomly selected ICLR reviews that received AI feedback, \\(26.6\\%\\) of reviewers updated their reviews, altogether incorporating 12,222 suggestions from the feedback agent into the reviews. Blinded ML researchers labeled these revised reviews as more informative and clearer than their initial versions. Reviewers who updated after receiving feedback increased the length of reviews by an average of 80 words. Furthermore, AI feedback led to more engaged discussions during the rebuttal period, as seen through longer author and reviewer responses. We also observed that reviewers who received feedback were more likely to change their scores after the rebuttal period, which was consistent with a more engaged rebuttal process." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.56, + 0.884, + 0.605 + ], + "angle": 0, + "content": "In this study, we present the first large-scale deployment for using LLMs to assist peer review. By making reviews more actionable and informative, we aim to enhance the peer review experience and promote a more constructive scientific process." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.628, + 0.262, + 0.646 + ], + "angle": 0, + "content": "2 Methods" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.659, + 0.884, + 0.72 + ], + "angle": 0, + "content": "In what follows, we first describe the review feedback experiment, including its goals and our technical setup with OpenReview. Next, we outline the architecture of our Review Feedback Agent and explain how the system was designed to meet our goals while ensuring a high level of reliability. In total, the agent automatically provided feedback to over 20,000 reviews at ICLR 2025." + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.738, + 0.55, + 0.757 + ], + "angle": 0, + "content": "2.1 ICLR 2025 review feedback experiment" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.763, + 0.884, + 0.853 + ], + "angle": 0, + "content": "Our pilot study was conducted in collaboration with ICLR 2025 and OpenReview. As one of the world's fastest-growing AI conferences, ICLR receives thousands of paper submissions yearly; in 2025, ICLR received 11,603 submissions. Each submission is assigned an average of 4 reviewers, and all reviews are standardized to include the same sections: summary, strengths, weaknesses, and questions. Furthermore, reviewers provide scores on a scale of 1 (low) to 10 (high), rating the paper according to the following categories: soundness, presentation, contribution, rating, and confidence." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.853, + 0.884, + 0.9 + ], + "angle": 0, + "content": "Goal: Our goal was to enhance review quality and, in particular, reduce low-information content reviews. Toward this goal, we identified three categories of common issues in reviews that we hoped to improve by providing LLM-generated feedback. The common issues are: 1) vague or generic critiques in reviews (the" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.506, + 0.948 + ], + "angle": 0, + "content": "2" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.121, + 0.111, + 0.135, + 0.12 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.129, + 0.131, + 0.446, + 0.36 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.462, + 0.112, + 0.472, + 0.12 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.468, + 0.12, + 0.871, + 0.363 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.122, + 0.369, + 0.134, + 0.377 + ], + "angle": 0, + "content": "C" + }, + { + "type": "image", + "bbox": [ + 0.139, + 0.378, + 0.877, + 0.671 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.712, + 0.885, + 0.896 + ], + "angle": 0, + "content": "Figure 1: (A) Randomized controlled study setup. Before the start of the review period, we randomly assigned all submissions to one of three groups to determine how many of its reviews received feedback: none, half, or all. When a review selected to receive feedback was submitted, the agent generated and posted feedback after 1 hour. Reviewers could update their review, optionally, based on the feedback until the end of the review period, which ran from October 14 to November 12, 2024. (B) Feedback categories. Our system is designed to address three main types of review comments. Here, we provide examples of comments that would receive feedback from our agent, as well as examples of the generated feedback. (C) Review Feedback Agent. Our system consists of five LLMs (Actors, Aggregator, Critic, and Formmatter). Two parallel Actors generate the initial feedback, then pass it to the Aggregator, the Critic, and finally the Formmatter. Finally, the feedback is passed through the reliability tests; upon successfully passing, the feedback is posted on a review. We provide examples of comments and feedback given to those comments by our system." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "3" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.885, + 0.167 + ], + "angle": 0, + "content": "feedback asks the reviewers to be more specific and actionable); 2) questions or confusions that could be addressed by overlooked parts of the paper (the feedback highlights relevant sections); and 3) unprofessional statements in the review (the feedback asks the reviewer to rephrase). For each comment in a review, the Review Feedback Agent determined if it fell into any of these problematic categories and, if so, provided feedback on that specific review comment." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.168, + 0.884, + 0.213 + ], + "angle": 0, + "content": "Experimental setup: We set up this experiment as a Randomized Control Trial (RCT) to enable us to make causal inferences about how receiving feedback influences the peer review process. Before the beginning of the review period, we randomly split papers into one of three equal groups (see Figure 1A):" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.22, + 0.495, + 0.236 + ], + "angle": 0, + "content": "1. No reviews for this paper will receive feedback," + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.244, + 0.725, + 0.259 + ], + "angle": 0, + "content": "2. Half of the reviews for this paper will be randomly selected to receive feedback," + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.268, + 0.495, + 0.283 + ], + "angle": 0, + "content": "3. All reviews for this paper will receive feedback." + }, + { + "type": "list", + "bbox": [ + 0.132, + 0.22, + 0.725, + 0.283 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.29, + 0.885, + 0.381 + ], + "angle": 0, + "content": "For reviews randomly assigned to receive feedback, the Review Feedback Agent, wrapped in an API, was automatically triggered when a reviewer first submitted their review on OpenReview. We delayed the feedback generation by one hour after a review was initially submitted to allow reviewers time to make any small edits (e.g., typo corrections). See Figure 1A for an example timeline. The agent posted feedback to reviews through the OpenReview interface by replying to reviews with the feedback wrapped in a comment. See Figure 2 for an example of what feedback looked like on the OpenReview website." + }, + { + "type": "image", + "bbox": [ + 0.236, + 0.405, + 0.764, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.823, + 0.884, + 0.869 + ], + "angle": 0, + "content": "Figure 2: OpenReview interface. Here, we provide an example of feedback posted to a review on the OpenReview website (with consent from the reviewer). Feedback is only visible to the reviewer and the ICLR program chairs and was posted roughly one hour after the initial review was submitted." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.882, + 0.884, + 0.913 + ], + "angle": 0, + "content": "The agent only provided feedback on the initial review, and there was no subsequent interaction between the reviewer and the feedback system after that time point. The feedback is only visible to the reviewer" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "4" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.166 + ], + "angle": 0, + "content": "and the ICLR program chairs; it was not shared with other reviewers, authors, or area chairs and was not a factor in the acceptance decisions. Reviewers were informed that the feedback was generated by a LLM and could choose to ignore the feedback or revise their review in response, as the system did not make any direct changes. Finally, we did not access or store any identifiable information about authors or reviewers. This study was reviewed by IRB and deemed low risk." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.168, + 0.885, + 0.273 + ], + "angle": 0, + "content": "Statistics: Around \\(50\\%\\) of reviews were randomly selected to receive feedback. Of the 44,831 reviews submitted on 11,553 unique papers (we excluded desk-rejected submissions), we posted feedback to 18,946 reviews \\((42.3\\%)\\) over 4 weeks from October 15 to November 12, 2024 (see Figure 2A). Less than \\(8\\%\\) of the selected reviews did not receive feedback for one of two reasons: 2,692 reviews were originally well-written and did not need feedback, while 829 reviews had feedback that failed the reliability tests. Each review took roughly one minute to run through our entire pipeline and cost around 50 cents. On average, each review that received feedback was given 3-4 feedback comments, with a minimum of 1 and a maximum of 17." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.291, + 0.4, + 0.308 + ], + "angle": 0, + "content": "2.2 Review Feedback Agent" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.317, + 0.884, + 0.345 + ], + "angle": 0, + "content": "The Review Feedback Agent aimed to provide feedback that helped reviewers make their comments more specific, constructive, and actionable for the authors." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.347, + 0.884, + 0.451 + ], + "angle": 0, + "content": "Feedback categories: The Review Feedback Agent provided suggestions on three potential categories of issues in reviews. We curated these categories by examining reviewer guidelines from several AI conferences [19, 20, 21, 22] and evaluating previously identified patterns of \"lazy reviewer thinking\" [7]. We also took inspiration from the ARR guidelines, where 16 common reviewer heuristics are outlined [23]. Importantly, the agent was not designed to suggest new ideas to add to the review; rather, it only focused on revising the existing ideas and preventing lower-quality reviews. The target feedback areas that we ultimately focused on were:" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.465, + 0.88, + 0.494 + ], + "angle": 0, + "content": "1. Improving specificity: Encouraging reviewers to rephrase vague review comments, making them more specific, actionable, and justified for the authors." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.505, + 0.88, + 0.534 + ], + "angle": 0, + "content": "2. Addressing misunderstandings: Highlighting sections of the paper that may already address some of the reviewer's questions or confusion." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.545, + 0.88, + 0.591 + ], + "angle": 0, + "content": "3. Reducing unprofessional remarks: Identifying and addressing unprofessional or inappropriate remarks in the review. A 2019 study of 1,106 researchers found that \\(58\\%\\) had received an unprofessional review, highlighting its prevalence [24]." + }, + { + "type": "list", + "bbox": [ + 0.132, + 0.465, + 0.88, + 0.591 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.603, + 0.88, + 0.633 + ], + "angle": 0, + "content": "See Figure 1B for examples of real reviewer comments (from ICLR 2024 reviews and public journal reviews) in each category that would receive feedback and examples of feedback that would be given." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.634, + 0.88, + 0.678 + ], + "angle": 0, + "content": "Preprocessing: The agent was provided with the paper PDF's text (extracted using pypdf's PDFReader [25]) and the review text as input. We extracted the summary, strengths, weaknesses, and questions sections from the review. We did not provide the agent with any of the scores the reviewer initially gave the paper." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.68, + 0.885, + 0.905 + ], + "angle": 0, + "content": "Architecture: The agent generated a list of pairs, with each pair consisting of a review comment that fit into one of the problematic categories above and the corresponding feedback provided for that comment. The agent was composed of a pipeline of five LLMs (see Algorithm 1, Figure 1C). We used the Claude Sonnet 3.5 (June 20, 2024) model [11] as the backbone; we picked the backbone model by generating feedback with the same prompt using GPT-4o, Gemini 1.5 Flash, and Claude Sonnet 3.5 and then conducting a blind preference evaluation. Additionally, through testing, we found that one LLM was insufficient to generate high-quality feedback and format it correctly, thus, we instantiated the multi-call pipeline. First, we defined two parallel actor LLMs to generate the initial set of feedback based on the previously defined target areas. The actors were provided with the initial review \\((R)\\) and paper text \\((P)\\) as inputs. We used two separate actors to optimize for feedback diversity. Then, we passed the two lists of feedback, \\(F_{1}\\) and \\(F_{2}\\), to an aggregator LLM, which merged the lists into one set of feedback, \\(F_{combined}\\). Next, we passed this candidate list to a critic LLM responsible for ensuring the feedback was accurate and clear. Importantly, the critic also removed any feedback that was too superficial or nitpicky, defined through various in-context examples (see Appendix A for the examples), as we did not want to overwhelm or annoy reviewers. Finally, aformatter LLM was provided with this final list, \\(F_{filtered}\\), and formatted it into pairs:" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.504, + 0.948 + ], + "angle": 0, + "content": "5" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.138, + 0.092, + 0.414, + 0.106 + ], + "angle": 0, + "content": "- **Reviewer comment:** a comment" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.116, + 0.557, + 0.131 + ], + "angle": 0, + "content": "- **Feedback to the reviewer:** feedback to the comment" + }, + { + "type": "list", + "bbox": [ + 0.138, + 0.092, + 0.557, + 0.131 + ], + "angle": 0, + "content": null + }, + { + "type": "code_caption", + "bbox": [ + 0.116, + 0.154, + 0.394, + 0.169 + ], + "angle": 0, + "content": "Algorithm 1 Review Feedback Agent" + }, + { + "type": "algorithm", + "bbox": [ + 0.117, + 0.17, + 0.536, + 0.337 + ], + "angle": 0, + "content": "1: Input: Paper text \\(P\\), Review \\(R\\), max attempts \\(T = 2\\) \n2: for \\(t = 1\\) to \\(T\\) do \n3: \\(F_{1} \\gets \\mathrm{Actor}_{1}(P, R)\\) \n4: \\(F_{2} \\gets \\mathrm{Actor}_{2}(P, R)\\) \n5: \\(F_{combined} \\gets \\mathrm{Aggregator}(F_{1}, F_{2})\\) \n6: \\(F_{filtered} \\gets \\mathrm{Critic}(F_{combined})\\) \n7: \\(F_{final} \\gets \\mathrm{Formatter}(F_{filtered})\\) \n8: if PassReliabilityTests(Ffinal) then return \\(F_{final}\\) \n9: end if \n10: end for \n11: return error" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.358, + 0.884, + 0.447 + ], + "angle": 0, + "content": "See Appendix A for the exact prompts used. To refine this system, we constructed a test set of 50 ICLR 2024 reviews we perceived to be of low quality in one or more of our target areas (i.e., they made vague comments, asked questions that were present in the paper already, and/or made unprofessional remarks). We iteratively ran our agent on this test set, examined the generated feedback, and refined the prompts to optimize the results. This procedure ultimately led to prompts that produced high-quality feedback for all 50 reviews in the test set." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.448, + 0.884, + 0.583 + ], + "angle": 0, + "content": "Reliability testing: Inspired by [26], we also developed a suite of reliability tests designed to act as guardrails, ensuring the quality of our generated feedback. Reliability tests evaluate specific attributes of a model's output. The four reliability tests we developed ensured the feedback provided constructive suggestions, addressed the reviewer, did not simply restate what the reviewer wrote, and was formatted correctly. We provide the exact reliability tests we used and examples of feedback that would fail the reliability tests in Appendix B. We developed up to five test cases for each reliability test and refined the reliability test prompts until we passed all the test cases. To refine our Review Feedback Agent's pipeline and prompts, we passed our test set reviews through the validated reliability tests until we achieved a \\(100\\%\\) pass rate." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.584, + 0.884, + 0.631 + ], + "angle": 0, + "content": "Feedback was only posted to a review if it passed all our reliability tests; if it failed, we re-ran the entire pipeline a second time (\\(T = 2\\)) to generate new feedback. Upon a second fail, we returned an error and did not post the feedback. Over \\(96\\%\\) of generated feedback for ICLR 2025 reviews passed all reliability tests." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.652, + 0.245, + 0.67 + ], + "angle": 0, + "content": "3 Results" + }, + { + "type": "title", + "bbox": [ + 0.111, + 0.684, + 0.787, + 0.703 + ], + "angle": 0, + "content": "3.1 Impact of feedback on review updates and reviewer engagement" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.709, + 0.884, + 0.754 + ], + "angle": 0, + "content": "First, we aimed to objectively measure how many reviewers updated their reviews after receiving feedback compared to those who did not receive feedback. This enabled us to assess how the feedback may have been associated with changes in various components of their review, such as length and scores." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.754, + 0.885, + 0.844 + ], + "angle": 0, + "content": "We conducted this ICLR experiment as a randomized controlled study by randomly splitting all reviews into one of two groups: not selected to receive feedback (control group) or selected to receive feedback (feedback group) - see Section 2 for more details. Note that the group selected to receive feedback includes the \\(7.9\\%\\) of reviews that were selected but did not actually receive feedback, mostly because AI deemed feedback not necessary there. This intent-to-treat definition of the feedback group enables us to conduct causal analysis but could dilute the actual effect of the feedback." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.845, + 0.884, + 0.906 + ], + "angle": 0, + "content": "Of all reviews in the feedback group, we further defined reviews that successfully received feedback as either being not updated or updated. A review is not updated if a reviewer did not edit their review after receiving feedback or if the edit distance between the initial and modified review was less than 5; this edit distance filtering accounted for minor updates such as fixing typos or modifying scores. Conversely, a review" + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "6" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.158, + 0.175, + 0.171, + 0.184 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.186, + 0.178, + 0.462, + 0.527 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.477, + 0.175, + 0.488, + 0.184 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.505, + 0.181, + 0.838, + 0.523 + ], + "angle": 0, + "content": null + }, + { + "type": "table_caption", + "bbox": [ + 0.155, + 0.553, + 0.167, + 0.562 + ], + "angle": 0, + "content": "C" + }, + { + "type": "table", + "bbox": [ + 0.173, + 0.574, + 0.825, + 0.621 + ], + "angle": 0, + "content": "
ControlFeedbackNot updatedUpdated
Average change in length7.021.0** (Δ + 200%)2.180.3***
" + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.632, + 0.884, + 0.827 + ], + "angle": 0, + "content": "Figure 3: (A) Feedback statistics. Among all ICLR 2025 reviews, 22,467 were randomly selected to receive feedback (feedback group), and 22,364 were randomly selected not to receive feedback (control group). Of those selected to receive feedback, 18,946 \\((42.3\\%)\\) successfully received feedback, with \\(26.6\\%\\) of those reviewers updating their reviews. (B) Update rates. (Top) Most reviews were submitted 2-3 days before the review deadline (November 4, 2024). (Bottom) Reviewers were more likely to update their review if they submitted it early relative to the deadline. Reviewers who received feedback were much more likely to update their reviews than those in the control group, with a difference of approximately 17 percentage points. (C) Average change in review length (measured as number of words). Review length is measured only for the following sections: summary, strengths, weaknesses, and questions. The difference in review length between the control and feedback groups is statistically significant \\((^{**}\\mathrm{p} \\leq 0.01)\\), with being selected to receive feedback leading to an average increase of 14 words more (a \\(200\\%\\) increase) in review length compared to the control group. The difference is more pronounced between the not-updated and updated groups \\((^{***}\\mathrm{p} \\leq 0.001)\\)." + }, + { + "type": "page_number", + "bbox": [ + 0.495, + 0.937, + 0.504, + 0.947 + ], + "angle": 0, + "content": "7" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.112, + 0.092, + 0.884, + 0.121 + ], + "angle": 0, + "content": "is updated if a reviewer did edit their review after receiving feedback and the edit distance between the initial and modified review was greater than 5." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.122, + 0.884, + 0.212 + ], + "angle": 0, + "content": "Of the 18,946 reviews that successfully received feedback, 5,031 (26.6%) reviews were updated (Figure 3A). Out of the 22,364 reviews in the control group, only 2,103 (9.4%) were updated; here, we define updated for the control group as a reviewer updating at least one hour after posting (the time it takes for the feedback group to receive feedback) with an edit distance greater than 5. With an update rate difference of roughly 17 percentage points (Figure 3B), we can see that reviews that received feedback were much more likely to be updated than those that did not." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.213, + 0.885, + 0.333 + ], + "angle": 0, + "content": "In Figure 3B, we also see that reviewers who submitted early relative to the deadline (November 4, 2024) were more likely to update their review than those who submitted close to or after the deadline. This suggests that more organized reviewers, who may already be more engaged in the review process, were more likely to revise their reviews in response to feedback. While this will influence our analysis comparing the not updated and updated groups, we can be confident that the underlying distribution of the control and feedback groups is similar and not biased by factors such as reviewer organization because we conducted this as an RCT. Randomization helps mitigate such biases, making it possible to assess the causal impact of the feedback on the peer review process." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.334, + 0.885, + 0.591 + ], + "angle": 0, + "content": "Finally, we analyzed the change in review length (number of words in the summary, strengths, weaknesses, and questions sections) among the groups (Figure 3C). We compared the initial review length and the modified review length; we refer to modified reviews as the review at the end of the four-week review period before the rebuttal period began (only these modified reviews are made public to authors). We saw that review length, on average, increased across all groups. First, we observed that being selected to receive feedback caused the average review length to increase by about 14 words more than reviews that were not selected to receive feedback. Note that this effect size is deflated due to the substantial number of reviewers who received feedback but did not update their review, as well as the \\(7.9\\%\\) of reviews that were selected to receive feedback but did not actually get it. We also see that updating the review after receiving feedback is associated with a statistically significant increase in review length (80 words) compared to not updating the review (2 words). We can infer that reviewers who updated their reviews were editing them more consistently to incorporate more detail and nuance, explaining this large increase in length. In addition to feedback causing an increase in review length, we also found that a significantly higher percentage of reviewers who received feedback edited at least one of their scores (soundness, presentation, contribution, rating, and confidence) during the review period, with \\(8.1\\%\\) of them making edits compared to \\(7.5\\%\\) among the control group \\((p \\leq 0.05)\\). In Appendix C, we observe no significant difference in the average score changes between the feedback and control groups." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.609, + 0.681, + 0.626 + ], + "angle": 0, + "content": "3.2 Measuring how much feedback reviewers incorporate" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.634, + 0.884, + 0.678 + ], + "angle": 0, + "content": "Of the reviewers that updated their review, we wanted to measure what proportion of them incorporated one or more pieces of feedback they were provided. This analysis helped us estimate how many reviewers found the feedback useful." + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.679, + 0.885, + 0.874 + ], + "angle": 0, + "content": "We counted a piece of feedback as incorporated if the reviewer clearly integrated some part of the feedback into their modified review. To systematically carry out this analysis, we developed an LLM-based pipeline to run on all updated reviews (see Supplementary Figure S2A). We used the Claude Sonnet 3.5 model to evaluate whether each feedback item received by a reviewer was incorporated into their modified review. See Appendix D for our approach to validating this pipeline. Of the 5,031 reviews that reviewers updated, encompassing 18,322 total feedback items, \\(89\\%\\) of reviewers incorporated at least one piece of feedback. This represents \\(23.6\\%\\) of all reviewers who received feedback (Figure 4A). In total, we estimate that 12,222 feedback items were incorporated into revised reviews. We also examined the number of feedback items reviewers who updated their reviews received compared to how many feedback items they incorporated (Figure 4B). We see that when reviewers receive fewer feedback items, they are more likely to incorporate more (or even all) of the items. Overall, the average reviewer who updated their review incorporated \\(69.3\\%\\) of the feedback they received; in other words, given 3 pieces of feedback, the average reviewer who updated their review incorporated 2 of them." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.875, + 0.884, + 0.906 + ], + "angle": 0, + "content": "Below are a few examples of realincorporations reviewers made based on their feedback. We provide the initial review comment they posted, the feedback they received, and then their modified comment." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.937, + 0.505, + 0.948 + ], + "angle": 0, + "content": "8" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.154, + 0.093, + 0.165, + 0.103 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.154, + 0.148, + 0.44, + 0.31 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.457, + 0.093, + 0.467, + 0.102 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image_caption", + "bbox": [ + 0.518, + 0.109, + 0.826, + 0.123 + ], + "angle": 0, + "content": "Of 12,222 total feedback items incorporated:" + }, + { + "type": "image", + "bbox": [ + 0.465, + 0.139, + 0.828, + 0.345 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.367, + 0.885, + 0.473 + ], + "angle": 0, + "content": "Figure 4: (A) Overall incorporation statistics. Through our LLM-based incorporation analysis, we estimate that \\(23.6\\%\\) of reviewers who were given feedback incorporated at least one feedback item they were given. This means that \\(89\\%\\) of reviewers who updated their review after receiving feedback incorporated at least one item. (B) Feedback incorporation trends. Here, we illustrate the relationship between the number of feedback items reviewers who updated their review received and how many of those items they incorporated. In total, reviewers incorporated 12,222 feedback items. Notably, reviewers were more likely to incorporate feedback when given fewer items." + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.5, + 0.576, + 0.516 + ], + "angle": 0, + "content": "Incorporation example: Feedback that encourages specificity" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.527, + 0.859, + 0.557 + ], + "angle": 0, + "content": "Original comment: Theorems 1, 2, 3 require the notion of interpolation, which is considered a strong assumption to have." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.57, + 0.859, + 0.645 + ], + "angle": 0, + "content": "Feedback: To make this critique more actionable, it would be helpful to elaborate on why the interpolation assumption is problematic in this context. Are there particular types of federated learning problems where this assumption is unlikely to hold? Providing concrete examples or suggestions for how the authors could relax or justify this assumption would give them clearer guidance on addressing this limitation." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.658, + 0.859, + 0.81 + ], + "angle": 0, + "content": "Revised comment: Theorems 1, 2, 3 require the notion of interpolation. Even though an explanation of regimes that satisfy this condition is provided, considering that there are previous works [1], [2] that extend beyond that setting, this assumption seems to be an avenue for future work in this field. More specifically, the initial FedProx algorithm [1] is analyzed in the general non-interpolated setting. In addition, the follow-up work regarding the FedExProx algorithm [2] considers in the main paper the interpolated regime. However, the authors provide additionally an illustration of the algorithm's behaviour in the non-interpolated setting (see Appendix F.3 in [2]). In that sense, it would be useful to provide some additional details on the behaviour of the algorithm in the non-interpolated setting or to comment on the main challenges in extending the current proof technique beyond the interpolation framework, offering in that way a more complete picture and direction for future research." + }, + { + "type": "page_number", + "bbox": [ + 0.494, + 0.936, + 0.505, + 0.948 + ], + "angle": 0, + "content": "9" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.14, + 0.093, + 0.612, + 0.108 + ], + "angle": 0, + "content": "Incorporation example: Feedback that identifies content oversight" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.12, + 0.63, + 0.135 + ], + "angle": 0, + "content": "Original reviewer comment: How is the performance on Llama?" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.148, + 0.859, + 0.193 + ], + "angle": 0, + "content": "Feedback to the reviewer: The paper appears to provide results on Llama 2-7B in Table 3. Does this address your question? If not, could you clarify what specific aspects of performance on Llama you're interested in that aren't covered by the current results?" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.206, + 0.816, + 0.221 + ], + "angle": 0, + "content": "Modified reviewer comment: How is the performance on Llama3 with the alpaca dataset?" + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.243, + 0.636, + 0.258 + ], + "angle": 0, + "content": "Incorporation example: Feedback that makes review more actionable" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.27, + 0.857, + 0.301 + ], + "angle": 0, + "content": "Original reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification." + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.313, + 0.859, + 0.373 + ], + "angle": 0, + "content": "Feedback to the reviewer: To make this comment more actionable, consider asking the authors to discuss potential extensions or challenges in applying their approach to other visual tasks. For example: 'How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?'" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.386, + 0.859, + 0.432 + ], + "angle": 0, + "content": "Modified reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification. How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.449, + 0.884, + 0.572 + ], + "angle": 0, + "content": "Finally, we wanted to assess whether these incorporated reviews were clearer, more specific, and more actionable for authors. To conduct this analysis, we asked two human AI researchers to conduct a blind preference evaluation between the initial and modified pre-rebuttal reviews. Specifically, we focused on reviews in the updated group that received 3-4 feedback items (the average number given) where the proportion of incorporated feedback exceeded 0.60. This threshold was chosen because the average incorporation rate was \\(67\\%\\), and we aimed to assess whether an average updated review with incorporated feedback was perceived as an improvement. Human annotators preferred modified reviews \\(89\\%\\) of the time (out of 100 examples), indicating that reviewers who incorporated feedback consistently produced higher-quality reviews." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.589, + 0.63, + 0.604 + ], + "angle": 0, + "content": "3.3 Influence of feedback on rebuttals and decisions" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.614, + 0.884, + 0.675 + ], + "angle": 0, + "content": "We next analyzed the impact of being selected to receive feedback on the rebuttal process and decision outcomes. The rebuttal period took place over three weeks between November 12 and December 4, 2024, and was a time when authors could respond to their reviewer's comments as they revised their papers. We examined how the feedback causally impacted different engagement measures during the rebuttal period." + }, + { + "type": "table", + "bbox": [ + 0.137, + 0.686, + 0.855, + 0.749 + ], + "angle": 0, + "content": "
ControlFeedbackNot updatedUpdated
Average length of author rebuttal807855*** (Δ + 6%)840896***
Average length of reviewer replies110116*** (Δ + 5.5%)115129***
" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.759, + 0.885, + 0.85 + ], + "angle": 0, + "content": "Table 1: Average change in rebuttal and reply length (measured as number of words). We observe that being selected to receive feedback causally increased the length of author rebuttals by an average of 48 words \\((6\\%;^{**}\\mathrm{p}\\leq 0.001)\\) for reviews written by reviewers who were selected to receive feedback, compared to those who were not. We also see that the average length of reviewer replies to author rebuttals is significantly longer among those who were selected to receive feedback, with an average increase of 6 words \\((5.5\\%;^{**}\\mathrm{p}\\leq 0.001)\\)." + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.865, + 0.884, + 0.911 + ], + "angle": 0, + "content": "In the first row of Table 1, we observed that authors posted rebuttals that were, on average, \\(6\\%\\) longer (48 words) to reviews written by reviewers who were selected to receive feedback, which is significantly longer than those posted to reviews in the control group. In other words, authors were generally more engaged when" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "10" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.182 + ], + "angle": 0, + "content": "their reviewer was selected to receive feedback. This could be because the feedback led to clearer and more actionable reviews, allowing authors to more effectively address and respond to the reviewer's comments with more detailed rebuttals. In the second row of Table 1, we also saw that reviewers who were selected to receive feedback responded to these rebuttals with replies that were, on average, \\(5.5\\%\\) longer (6 words) than those who were not selected, again highlighting increased engagement among reviewers if they were in the feedback group." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.183, + 0.884, + 0.272 + ], + "angle": 0, + "content": "This increased engagement is reflected in the percentage of reviewers who edited one or more of their scores for a paper during the rebuttal period. We found that \\(31.7\\%\\) of reviewers who received feedback edited their scores, compared to \\(30.6\\%\\) of those who did not, consistent with receiving feedback being associated with greater reviewer-author engagement. Overall, these findings lead us to conclude that authors were better able to address their reviewers' original concerns during the rebuttal period if their reviewer was selected to receive feedback, leading to more engagement and satisfaction among both groups." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.273, + 0.885, + 0.395 + ], + "angle": 0, + "content": "Finally, we evaluated whether papers with reviews that were selected to receive feedback had a different acceptance rate than those that were not. We compared the acceptance rates of the control and feedback groups, defining the control group as all papers where no reviews were selected to receive feedback and the feedback group as those where at least one review was selected to receive feedback. While there was a slightly higher acceptance rate of \\(32.3\\%\\) among papers in the feedback group, compared to \\(30.8\\%\\) among the control group, this difference was not statistically significant. This indicates that while receiving feedback promoted more engaged and thorough discussions among reviewers and authors, it did not substantially change acceptance rates." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.411, + 0.609, + 0.429 + ], + "angle": 0, + "content": "3.4 Clustering analysis of the feedback comments" + }, + { + "type": "image_caption", + "bbox": [ + 0.118, + 0.45, + 0.133, + 0.461 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.116, + 0.491, + 0.378, + 0.6 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.373, + 0.461, + 0.585, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.587, + 0.451, + 0.597, + 0.461 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.607, + 0.464, + 0.876, + 0.617 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.66, + 0.884, + 0.781 + ], + "angle": 0, + "content": "Figure 5: (A) Feedback clusters. We used an LLM to group all the feedback items we provided to reviewers into five distinct clusters based on the text. We found that nearly half of the feedback was directed at asking the reviewer to 'clarify methodological concerns to make their request specific and actionable.' The next most popular cluster was feedback asking the reviewer to 'clarify their request by adding specific analyses, baselines, or references.' (B) Incorporation rate by cluster. We measured the percentage of feedback items within each cluster that reviewers incorporated. Overall, \\(17.7\\%\\) of all feedback was incorporated. When examined by cluster, incorporation rates ranged from \\(14\\%\\) to \\(18\\%\\), with no statistically significant differences observed." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.792, + 0.885, + 0.913 + ], + "angle": 0, + "content": "To gain more insights into what types of feedback were provided by the AI agent, we present a quantitative clustering analysis of the feedback generated. Of the 69,836 total feedback items the agent gave to reviewers, we characterized the feedback by generating distinct clusters. Inspired by [27, 28], we generated five clusters on 1000 randomly sampled feedback items using an LLM; we repeated this process twice to ensure robustness. Of the 10 candidate clusters, we prompted a second LLM to select five non-overlapping clusters, giving us the five clusters we present below. Finally, we prompted a third LLM to assign the feedback items to one of the five clusters so we could determine what proportion of feedback belonged to each cluster. We depict the five clusters in Figure 5A, as well as examples of feedback items that would fall into each cluster in Table 2." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "11" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.115, + 0.088, + 0.921, + 0.665 + ], + "angle": 0, + "content": "
ClusterReviewer commentFeedback to the reviewer
Clarify methodological concerns to make request specific and actionableThe experimental setup and tool flow used to conduct the experiments should be described in more detail.To make this feedback more actionable, it would be helpful to specify what aspects of the experimental setup and tool flow you feel are missing. For example, are you looking for more details on the hardware specifications used for CPU/GPU/FPGA experiments, software frameworks and versions, training hyperparameters, or quantization settings?
Clarify request by adding specific analyses, baselines, or referencesAll the experiment examples are numerical and toy, and there is a large mismatch with real physical engineering systems. Therefore, I do not think they are convincing.To make this critique more constructive, you could suggest specific real-world physical systems or benchmarks that would provide a more convincing evaluation of the method. This would give the authors concrete ideas for improving their experimental validation.
Clarify suggestions to visuals or formatA few suggestions, Fig. 2 is very hard to understand. Is there another way to convey the same information which is easier to read?To make this feedback more constructive, try to articulate what specific aspects of Figure 2 are difficult to understand. You could suggest alternative visualization formats or specific elements that need clarification. This would help the authors know how to improve the figure.
Request authors expand on methodological limitations or broader implicationsCSP can only support binary labels.To make this comment more constructive, you could ask the authors to discuss the implications of this limitation for real-world applications and whether they have considered any extensions to support multi-class problems.
Clarify desired improvements to theoretical analysis or proofsHow standard are the assumptions (Assumptions C.1 and C.2) required for proving Theorems 4.1 and 4.2? It would be helpful to see more justification regarding their validity (e.g. other papers building based upon similar assumptions).This is an excellent question that probes the theoretical foundations of the paper. To make it more actionable, you could ask the authors to provide specific examples of other works in the field that rely on similar assumptions, or to explain how these assumptions compare to those typically used in related theoretical analyses. This would help contextualize the theoretical contributions within the broader literature.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.112, + 0.68, + 0.884, + 0.711 + ], + "angle": 0, + "content": "Table 2: Examples of AI-generated feedback that belong to each of the five main clusters. We also provide the original review comment that triggered the generation of the feedback." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.736, + 0.884, + 0.827 + ], + "angle": 0, + "content": "These clusters indicate that the vast majority of feedback provided was addressed towards vague review comments and aimed to make them more specific, actionable, and justified. We saw that the agent rarely chose to comment on content misunderstandings, in large part because it had to be absolutely certain there was an error and provide a direct quote from the paper highlighting the mistake as we did not tolerate any hallucinations. Therefore, we saw that the model would err on the side of caution and not provide many comments related to that category." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.827, + 0.885, + 0.903 + ], + "angle": 0, + "content": "We also sought to measure the percentage of feedback items within each cluster that were incorporated by reviewers, as shown in Figure 5B. Overall, out of the 69,836 feedback items given, we found that \\(17.7\\%\\) of all feedback was incorporated. On a cluster basis, we found that the 'clarify request by adding specific analyses, baselines, or references' and 'clarify desired improvements to theoretical analysis or proofs' clusters had the highest incorporation rate at \\(18\\%\\). The 'clarify suggestions to visuals or format' cluster had the" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.51, + 0.948 + ], + "angle": 0, + "content": "12" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.887, + 0.138 + ], + "angle": 0, + "content": "lowest incorporation rate at \\(14\\%\\). Overall, we do not see statistically significant differences in incorporation rates among the clusters, implying that reviewers did not find certain categories of feedback to be more or less useful than others." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.16, + 0.333, + 0.181 + ], + "angle": 0, + "content": "4 Related Works" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.191, + 0.885, + 0.282 + ], + "angle": 0, + "content": "Due to their extensive capabilities, LLMs are being used across every stage of the peer review process. Reviewers increasingly use LLMs to assist in drafting peer reviews [15, 29, 30]. An estimated \\(17.5\\%\\) of authors of Computer Science abstracts on arXiv [31] and \\(10.6\\%\\) of reviewers at ICLR 2024 [16] used LLMs for writing assistance. Other studies have shown the potential of LLMs to make the entire review pipeline more efficient across various stages [32, 33, 34, 35] such as writing manuscripts [36], initial quality control [37, 38, 27], and even providing AI-generated instructions for how to write reviews [39]." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.282, + 0.887, + 0.387 + ], + "angle": 0, + "content": "As peer review workloads continue to increase, LLMs present an opportunity to alleviate some of the burden on human reviewers by providing reviews of submitted manuscripts. In a prospective survey study, 308 researchers from 110 institutions received GPT-4-generated feedback on their papers. Of these, \\(57.4\\%\\) found the feedback helpful, and \\(82.4\\%\\) felt it was more useful than the feedback provided by at least some human reviewers [12]. Building off of this work, [40] proposed a multi-agent review generation system that improved the specificity and helpfulness of feedback provided compared to GPT-4, reducing the rate of generic comments from \\(60\\%\\) to \\(29\\%\\)." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.387, + 0.885, + 0.493 + ], + "angle": 0, + "content": "Furthermore, LLMs offer an efficient and possibly less biased alternative to human evaluations; [41] found that human evaluators of peer reviews were highly susceptible to bias from review length and paper score, as there were high levels of subjectivity among reviewers. These findings suggest that integrating LLMs into the review evaluation process could standardize assessments and reduce inconsistencies. As LLM-based tools continue to evolve, they hold the potential to improve both the speed and quality of manuscript evaluations. Our experiment is the first to demonstrate how LLMs can improve the peer review process on a large scale, highlighting their practical benefits." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.493, + 0.887, + 0.631 + ], + "angle": 0, + "content": "However, despite these advancements, no prior studies had specifically examined how LLMs could be used to provide feedback on peer reviews in the areas we focused on in our experiment. A study released after our ICLR experiment, however, introduced a benchmark to identify toxicity in peer reviews [42]. The authors identified four categories of toxic comments: using emotive or sarcastic language, vague or overly critical feedback, personal attacks, and excessive negativity. These categories align closely with the ones we chose for our agent to provide feedback on. The authors benchmarked several LLMs for detecting toxicity and tested their ability to revise toxic sentences, finding that human evaluators preferred \\(80\\%\\) of these revisions. In future iterations of our Review Feedback Agent, this benchmark could offer a valuable tool for testing our pipeline's ability to detect toxicity and offer constructive feedback." + }, + { + "type": "title", + "bbox": [ + 0.113, + 0.652, + 0.281, + 0.673 + ], + "angle": 0, + "content": "5 Discussion" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.683, + 0.885, + 0.803 + ], + "angle": 0, + "content": "Our research demonstrates the significant potential of LLM-based systems to enhance peer review quality at scale. By providing targeted feedback to reviewers at ICLR 2025, we observed meaningful improvements in review specificity, engagement, and actionability. We saw that \\(27\\%\\) of reviewers updated their reviews, and an overwhelming majority of those who made updates incorporated at least one piece of feedback into their modifications. Blinded AI researchers found the updated reviews to be consistently more clear and informative. Furthermore, feedback intervention led to increased engagement throughout the review process, with longer reviews, rebuttals, and reviewer responses, suggesting more involved discussions between authors and reviewers." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.804, + 0.887, + 0.911 + ], + "angle": 0, + "content": "We designed the AI feedback system to enhance reviews while ensuring human reviewers retain complete control. First, the AI-generated feedback was purely optional, and reviewers could decide whether to incorporate it or not; by default, they could opt out by ignoring the feedback. Second, human reviewers had full control over the final review and the scores visible to the authors. To reduce the risk of hallucination, the AI feedback had to pass several rigorous reliability tests before being shared with reviewers. Finally, no personal or identifiable information about reviewers or authors was disclosed to the agent. An IRB review deemed the system to be low risk." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.511, + 0.949 + ], + "angle": 0, + "content": "13" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.111, + 0.092, + 0.884, + 0.242 + ], + "angle": 0, + "content": "Going forward, there are several directions to further improve the Review Feedback Agent. Our feedback categories focused on three main areas (improving specificity, addressing misunderstandings, and ensuring professionalism). While these categories were derived from reviewer guides and previous studies and encompass the majority of author complaints, they may not capture all aspects of review quality. Expanding to other categories would be helpful. Additionally, it would be interesting to explore the use of reasoning models to generate more nuanced feedback for complex issues in reviews. Finally, the concept of developing reliability tests for LLMs is an evolving field, with new studies emerging after our experiment [43, 44], and we hope to incorporate ideas from these recent works to improve the robustness of our framework. Ultimately, we expect that running this agent at future AI conferences across a diverse range of research topics will improve its robustness and effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.243, + 0.885, + 0.381 + ], + "angle": 0, + "content": "CS conferences have long leveraged machine learning to enhance their peer review processes. One early example is the Toronto Paper Matching algorithm, which was used in NIPS 2010 to match papers with reviewers and has since been deployed by over 50 conferences [45]. However, the impact of many of these earlier applications of machine learning has not been rigorously quantified. To address this gap, we were motivated to conduct this randomized controlled study to rigorously evaluate the effects of review feedback before broader deployment. Our findings show that by striving to make reviews more informative for authors, the Review Feedback Agent has the potential to enhance the overall quality of scientific communication. As LLM capabilities continue to advance, we anticipate even more advanced systems that can provide tailored feedback to reviewers, ultimately benefiting the entire scientific community through improved peer review." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.401, + 0.338, + 0.421 + ], + "angle": 0, + "content": "Acknowledgements" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.432, + 0.884, + 0.495 + ], + "angle": 0, + "content": "We would like to thank Celeste Martínez and Carlos Mondragon Chapa at OpenReview for their help in integrating our agent into the OpenReview interface. We would also like to thank Alex Tamkin and Anthropic for helping us increase our rate limits. Finally, we would like to thank members of the Zou group for their support and comments on this work." + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.515, + 0.373, + 0.535 + ], + "angle": 0, + "content": "Author Contributions" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.546, + 0.884, + 0.593 + ], + "angle": 0, + "content": "NT, MY, JS, and JZ designed, developed, and deployed the Review Feedback Agent, conducted analyses, and wrote the paper. AG, NP, FS, RY, and CV are program chairs of ICLR 2025 and provided guidance on the feedback study and analysis." + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.614, + 0.245, + 0.634 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.645, + 0.884, + 0.677 + ], + "angle": 0, + "content": "[1] Bruce Alberts, Brooks Hanson, and Katrina L. Kelner. Editorial: Reviewing peer review. Science, 321(5885):15-15, 2008." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.685, + 0.885, + 0.716 + ], + "angle": 0, + "content": "[2] Jacalyn Kelly, Tara Sadeghieh, and Khosrow Adeli. Peer review in scientific publications: benefits, critiques, & a survival guide. *Ejifcc*, 25(3):227, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.724, + 0.5, + 0.74 + ], + "angle": 0, + "content": "[3] Publons. Global state of peer review 2018, 2018." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.748, + 0.885, + 0.779 + ], + "angle": 0, + "content": "[4] Ariful Azad and Afeefa Banu. Publication trends in artificial intelligence conferences: The rise of super prolific authors, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.788, + 0.885, + 0.833 + ], + "angle": 0, + "content": "[5] Alison McCook. Is peer review broken? submissions are up, reviewers are overtaxed, and authors are lodging complaint after complaint about the process at top-tier journals. what's wrong with peer review?, 2006." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.843, + 0.408, + 0.859 + ], + "angle": 0, + "content": "[6] ICLR. Iclr 2024 press release, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.867, + 0.885, + 0.913 + ], + "angle": 0, + "content": "[7] Anna Rogers and Isabelle Augenstein. What can we do to improve peer review in NLP? In Trevor Cohn, Yulan He, and Yang Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1256–1262, Online, November 2020. Association for Computational Linguistics." + }, + { + "type": "list", + "bbox": [ + 0.124, + 0.645, + 0.885, + 0.913 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.511, + 0.948 + ], + "angle": 0, + "content": "14" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.123, + 0.09, + 0.885, + 0.138 + ], + "angle": 0, + "content": "[8] Anna Rogers, Marzena Karpinska, Jordan Boyd-Graber, and Naoaki Okazaki. Program chairs' report on peer review at acl 2023. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages x1-lxxv, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.124, + 0.146, + 0.71, + 0.163 + ], + "angle": 0, + "content": "[9] Martijn Arns. Open access is tiring out peer reviewers. Nature, 515:467, 2014." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.171, + 0.883, + 0.204 + ], + "angle": 0, + "content": "[10] Corinna Cortes and Neil D. Lawrence. Inconsistency in conference peer review: Revisiting the 2014 neurips experiment, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.211, + 0.41, + 0.228 + ], + "angle": 0, + "content": "[11] Anthropic. Claude 3.5 sonnet, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.236, + 0.884, + 0.284 + ], + "angle": 0, + "content": "[12] Weixin Liang, Yuhui Zhang, Hancheng Cao, Binglu Wang, Daisy Yi Ding, Xinyu Yang, Kailas Vodra-halli, Siyu He, Daniel Scott Smith, Yian Yin, et al. Can large language models provide useful feedback on research papers? a large-scale empirical analysis. NEJM AI, 1(8):AIoa2400196, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.292, + 0.882, + 0.324 + ], + "angle": 0, + "content": "[13] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Zhi Huang, Carlos Guestrin, and James Zou. Textgrad: Automatic \"differentiation\" via text, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.332, + 0.882, + 0.394 + ], + "angle": 0, + "content": "[14] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.403, + 0.882, + 0.449 + ], + "angle": 0, + "content": "[15] Mohammad Hosseini and Serge P J M Horbach. Fighting reviewer fatigue or amplifying bias? considerations and recommendations for use of chatgpt and other large language models in scholarly peer review. Research Integrity and Peer Review, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.457, + 0.882, + 0.505 + ], + "angle": 0, + "content": "[16] Weixin Liang, Zachary Izzo, Yaohui Zhang, Haley Lepp, Hancheng Cao, Xuandong Zhao, Lingjiao Chen, Haotian Ye, Sheng Liu, Zhi Huang, Daniel A. McFarland, and James Y. Zou. Monitoring ai-modified content at scale: A case study on the impact of chatgpt on ai conference peer reviews, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.513, + 0.882, + 0.56 + ], + "angle": 0, + "content": "[17] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.569, + 0.882, + 0.614 + ], + "angle": 0, + "content": "[18] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.624, + 0.628, + 0.641 + ], + "angle": 0, + "content": "[19] ICML 2023 program committee. Icml 2023 reviewer tutorial, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.649, + 0.833, + 0.667 + ], + "angle": 0, + "content": "[20] ICML 2022 Program Chairs. How to be a good reviewer? reviewer tutorial for icml 2022, 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.675, + 0.533, + 0.691 + ], + "angle": 0, + "content": "[21] ACL PC Chairs. Last minute reviewing advice, 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.699, + 0.882, + 0.73 + ], + "angle": 0, + "content": "[22] Matias Valdenegro. Lxcv @ cvpr 2021 reviewer mentoring program: And how to write good reviews, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.74, + 0.61, + 0.757 + ], + "angle": 0, + "content": "[23] Isabelle Augenstein Anna Rogers. Arr reviewer guidelines, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.765, + 0.882, + 0.796 + ], + "angle": 0, + "content": "[24] Nyssa J Silbiger and Amber D Stubler. Unprofessional peer reviews disproportionately harm underrepresented groups in stem. PeerJ, 7:e8247, 2019." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.805, + 0.882, + 0.838 + ], + "angle": 0, + "content": "[25] Mathieu Fenniak, Matthew Stamy, pubpub zz, Martin Thoma, Matthew Peveler, exiledkingcc, and pypdf Contributors. The pypdf library, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.846, + 0.882, + 0.876 + ], + "angle": 0, + "content": "[26] Marco Tulio Ribeiro and Scott Lundberg. Testing language models (and prompts) like we test software, 2023." + }, + { + "type": "list", + "bbox": [ + 0.116, + 0.09, + 0.885, + 0.876 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "15" + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.09, + 0.883, + 0.138 + ], + "angle": 0, + "content": "[27] Alexander Goldberg, Ihsan Ullah, Thanh Gia Hieu Khuong, Benedictus Kent Rachmat, Zhen Xu, Isabelle Guyon, and Nihar B. Shah. Usefulness of llms as an author checklist assistant for scientific papers: Neurips'24 experiment, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.115, + 0.145, + 0.885, + 0.209 + ], + "angle": 0, + "content": "[28] Alex Tamkin, Miles McCain, Kunal Handa, Esin Durmus, Liane Lovitt, Ankur Rathi, Saffron Huang, Alfred Mountfield, Jerry Hong, Stuart Ritchie, Michael Stern, Brian Clarke, Landon Goldberg, Theodore R. Sumers, Jared Mueller, William McEachen, Wes Mitchell, Shan Carter, Jack Clark, Jared Kaplan, and Deep Ganguli. Clio: Privacy-preserving insights into real-world ai use, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.215, + 0.882, + 0.249 + ], + "angle": 0, + "content": "[29] Ryan Liu and Nihar B. Shah. Reviewergpt? an exploratory study on using large language models for paper reviewing, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.255, + 0.882, + 0.289 + ], + "angle": 0, + "content": "[30] Som Biswas, Dushyant Dobaria, and Harris L. Cohen. Chatgpt and the future of journal reviews: A feasibility study. The Yale Journal of Biology and Medicine, 96(3):415-420, 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.296, + 0.882, + 0.344 + ], + "angle": 0, + "content": "[31] Weixin Liang, Yaohui Zhang, Zhengxuan Wu, Haley Lepp, Wenlong Ji, Xuandong Zhao, Hancheng Cao, Sheng Liu, Siyu He, Zhi Huang, Diyi Yang, Christopher Potts, Christopher D Manning, and James Y. Zou. Mapping the increasing use of llms in scientific papers, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.351, + 0.882, + 0.384 + ], + "angle": 0, + "content": "[32] Nihar B. Shah. Challenges, experiments, and computational solutions in peer review. Commun. ACM, 65(6):76-87, May 2022." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.391, + 0.882, + 0.425 + ], + "angle": 0, + "content": "[33] Simon Price and Peter A. Flach. Computational support for academic peer review: a perspective from artificial intelligence. *Commun. ACM*, 60(3):70-79, February 2017." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.432, + 0.885, + 0.464 + ], + "angle": 0, + "content": "[34] Atreyi Kankanhalli. Peer review in the age of generative ai. Journal of the Association for Information Systems, 25(1), 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.472, + 0.882, + 0.55 + ], + "angle": 0, + "content": "[35] Ilia Kuznetsov, Osama Mohammed Afzal, Koen Dercksen, Nils Dycke, Alexander Goldberg, Tom Hope, Dirk Hovy, Jonathan K. Kummerfeld, Anne Lauscher, Kevin Leyton-Brown, Sheng Lu, Mausam, Margot Mieskes, Aurélie Néveol, Danish Pruthi, Lizhen Qu, Roy Schwartz, Noah A. Smith, Thamar Solorio, Jingyan Wang, Xiaodan Zhu, Anna Rogers, Nihar B. Shah, and Iryna Gurevych. What can natural language processing do for peer review?, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.558, + 0.882, + 0.605 + ], + "angle": 0, + "content": "[36] Tiffany I Leung, Taiane de Azevedo Cardoso, Amaryllis Mavragani, and Gunther Eysenbach. Best practices for using ai tools as an author, peer reviewer, or editor. J Med Internet Res, 25:e51584, Aug 2023." + }, + { + "type": "ref_text", + "bbox": [ + 0.117, + 0.613, + 0.882, + 0.646 + ], + "angle": 0, + "content": "[37] Alessandro Checco, Lorenzo Bracciale, Pierpaolo Loreti, Stephen Pinfield, and Giuseppe Bianchi. AI-assisted peer review. Humanities and Social Sciences Communications, 2021." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.653, + 0.882, + 0.687 + ], + "angle": 0, + "content": "[38] Kayvan Kousha and Mike Thelwall. Artificial intelligence to support publishing and peer review: A summary and review. Learned Publishing, 37(1):4-12, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.693, + 0.882, + 0.786 + ], + "angle": 0, + "content": "[39] Xiaotian Su, Thiemo Wambsgangss, Roman Rietsche, Seyed Parsa Neshaei, and Tanja Käser. Reviewwriter: AI-generated instructions for peer review writing. In Ekaterina Kochmar, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Nitin Madnani, Anaïs Tack, Victoria Yaneva, Zheng Yuan, and Torsten Zesch, editors, Proceedings of the 18th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2023), pages 57–71, Toronto, Canada, July 2023. Association for Computational Linguistics." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.794, + 0.882, + 0.827 + ], + "angle": 0, + "content": "[40] Mike D'Arcy, Tom Hope, Larry Birnbaum, and Doug Downey. Marg: Multi-agent review generation for scientific papers, 2024." + }, + { + "type": "ref_text", + "bbox": [ + 0.116, + 0.834, + 0.882, + 0.88 + ], + "angle": 0, + "content": "[41] Alexander Goldberg, Ivan Stelmakh, Kyunghyun Cho, Alice Oh, Alekh Agarwal, Danielle Belgrave, and Nihar B. Shah. Peer reviews of peer reviews: A randomized controlled trial and other experiments, 2024." + }, + { + "type": "list", + "bbox": [ + 0.115, + 0.09, + 0.885, + 0.88 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "16" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.115, + 0.091, + 0.88, + 0.135 + ], + "angle": 0, + "content": "[42] Man Luo, Bradley Peterson, Rafael Gan, Hari Ramalingame, Navya Gangrade, Ariadne Dimarogona, Imon Banerjee, and Phillip Howard. Benchmark on peer review toxic detection: A challenging task with a new dataset, 2025." + }, + { + "type": "text", + "bbox": [ + 0.115, + 0.144, + 0.88, + 0.187 + ], + "angle": 0, + "content": "[43] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests, 2024." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.197, + 0.88, + 0.225 + ], + "angle": 0, + "content": "[44] Archiki Prasad, Elias Stengel-Eskin, Justin Chih-Yao Chen, Zaid Khan, and Mohit Bansal. Learning to generate unit tests for automated debugging, 2025." + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.234, + 0.878, + 0.263 + ], + "angle": 0, + "content": "[45] Laurent Charlin, Richard S Zemel, and Craig Boutilier. A framework for optimizing paper matching In UAI, volume 11, pages 86-95, 2011." + }, + { + "type": "list", + "bbox": [ + 0.114, + 0.091, + 0.88, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.115, + 0.3, + 0.31, + 0.328 + ], + "angle": 0, + "content": "Appendices" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.345, + 0.342, + 0.366 + ], + "angle": 0, + "content": "A Agent Prompts" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.375, + 0.882, + 0.405 + ], + "angle": 0, + "content": "We manually fine-tuned the following prompts for the LLMs in the Review Feedback Agent. We provide the prompts below:" + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.418, + 0.244, + 0.432 + ], + "angle": 0, + "content": "Actor Prompt" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.444, + 0.856, + 0.473 + ], + "angle": 0, + "content": "Here is the paper: {paper} . Here is the peer review: {review} ." + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.495, + 0.3, + 0.511 + ], + "angle": 0, + "content": "Actor System Prompt" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.522, + 0.857, + 0.581 + ], + "angle": 0, + "content": "You are given a peer review of a machine learning paper submitted to a top-tier ML conference on OpenReview. Your task is to provide constructive feedback to the reviewer so that it becomes a high-quality review. You will do this by evaluating the review against a checklist and providing specific feedback about where the review fails." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.584, + 0.388, + 0.598 + ], + "angle": 0, + "content": "Here are step-by-step instructions:" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.608, + 0.748, + 0.622 + ], + "angle": 0, + "content": "1. Read the text of the review and the paper about which the review was written." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.633, + 0.461, + 0.647 + ], + "angle": 0, + "content": "2. Evaluate every comment in the review:" + }, + { + "type": "list", + "bbox": [ + 0.159, + 0.608, + 0.748, + 0.647 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.658, + 0.856, + 0.687 + ], + "angle": 0, + "content": "- Focus on comments related to weaknesses of the paper or questions the reviewer has. Ignore any comments that are summaries of the paper or that discuss strengths of the paper." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.692, + 0.856, + 0.721 + ], + "angle": 0, + "content": "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.728, + 0.856, + 0.757 + ], + "angle": 0, + "content": "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.763, + 0.856, + 0.807 + ], + "angle": 0, + "content": "- For each comment, evaluate it against the following checklist. Follow the examples for how to respond. Importantly, you should be as helpful as possible. Do no ask superficial questions or make superficial remarks, think deeply and exhibit your understanding." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.812, + 0.856, + 0.841 + ], + "angle": 0, + "content": "- Most reviewer comments are already sufficiently clear and actionable. Only focus on the ones that clearly fail the checklist items below." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.848, + 0.856, + 0.896 + ], + "angle": 0, + "content": "- Checklist: \n(a) Check if the reviewer requests something obviously present in the paper. Only respond if certain of the reviewer's error. If so, politely pose a question to the reviewer with" + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.658, + 0.856, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "17" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.245, + 0.1, + 0.857, + 0.144 + ], + "angle": 0, + "content": "something like \"Does the following answer your question...?\" quote the relevant paper section verbatim using tags. Use only exact quotes and do not comment if uncertain." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.145, + 0.857, + 0.176 + ], + "angle": 0, + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.179, + 0.359, + 0.193 + ], + "angle": 0, + "content": "- Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.196, + 0.856, + 0.226 + ], + "angle": 0, + "content": "* Reviewer comment: In Figure 4, the efficiency experiments have no results for Transformer models, which is a key limitation of the paper." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.229, + 0.856, + 0.274 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Does Figure 5 of the paper answer your question? In particular: In Transformers, the proposed technique provides \\(25\\%\\) relative improvement in wall-clock time (Figure 5) ." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.196, + 0.856, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.277, + 0.359, + 0.291 + ], + "angle": 0, + "content": "- Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.294, + 0.856, + 0.37 + ], + "angle": 0, + "content": "* Reviewer comment: The authors propose a new deep learning model for predicting protein-protein interactions but don't explain how they address the class imbalance in PPI datasets. Most protein pairs don't interact, creating an imbalance between positive and negative samples. It's unclear how the model balances sensitivity and specificity, which is important for systems biology applications." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.372, + 0.856, + 0.493 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Does section 3.3 of the paper address your concern? Specifically, the following passage: To address the class imbalance in PPI datasets, where non-interacting pairs are far more common, we employ a \"Balanced Interaction Learning\" (BIL) approach. This involves using a focal loss function to reduce the influence of easy negatives, balanced minibatch sampling to ensure a mix of positive and negative samples, and a two-stage training process with pre-training on a balanced subset before fine-tuning on the full dataset ." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.294, + 0.856, + 0.493 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.496, + 0.359, + 0.51 + ], + "angle": 0, + "content": "- Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.513, + 0.856, + 0.573 + ], + "angle": 0, + "content": "* Reviewer comment: Lack of theoretical analysis of the communication complexity of the proposed method. In distributed optimization, communication complexity is crucial for minimizing inter-node communication to enhance system efficiency and reduce communication costs." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.576, + 0.856, + 0.652 + ], + "angle": 0, + "content": "* Feedback to the reviewer: The paper appears to provide a theoretical analysis of communication complexity. Specifically, Theorem 3.6 states an \\( \\mathrm{O}(\\sqrt{\\kappa_{max}}\\log (1 / \\epsilon)) \\) communication complexity bound. Does this address your concern? Are there specific aspects of communication complexity analysis you feel are missing?" + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.513, + 0.856, + 0.652 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.217, + 0.654, + 0.857, + 0.699 + ], + "angle": 0, + "content": "(b) Look for any vague or unjustified claims in the review. This results in points that are not actionable or harder to respond to. For such cases, we would like to nudge the reviewer to provide more specific details and justify their claim." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.7, + 0.856, + 0.714 + ], + "angle": 0, + "content": "First, let us define what it means for a comment to be actionable and specific enough." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.715, + 0.699, + 0.729 + ], + "angle": 0, + "content": "There are a few pieces of criteria we will use to determine this:" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.732, + 0.856, + 0.761 + ], + "angle": 0, + "content": "i. The review comment specifies the section, paragraph, figure, or table where the issue occurs." + }, + { + "type": "text", + "bbox": [ + 0.251, + 0.765, + 0.856, + 0.795 + ], + "angle": 0, + "content": "ii. The issue or concern in the review comment is explicitly stated, avoiding vague language." + }, + { + "type": "text", + "bbox": [ + 0.247, + 0.798, + 0.856, + 0.828 + ], + "angle": 0, + "content": "iii. The comment explains why the identified issue is problematic and needs addressing." + }, + { + "type": "list", + "bbox": [ + 0.247, + 0.732, + 0.856, + 0.828 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.247, + 0.83, + 0.574, + 0.845 + ], + "angle": 0, + "content": "iv. The reviewer provides concrete examples:" + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.848, + 0.742, + 0.863 + ], + "angle": 0, + "content": "A. At least one example of what they find unclear or problematic." + }, + { + "type": "text", + "bbox": [ + 0.265, + 0.865, + 0.856, + 0.896 + ], + "angle": 0, + "content": "B. At least one example or suggestion of what would address their concern (e.g., specific metrics, experiments, or changes)." + }, + { + "type": "list", + "bbox": [ + 0.265, + 0.848, + 0.856, + 0.896 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "18" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.245, + 0.1, + 0.859, + 0.145 + ], + "angle": 0, + "content": "Do NOT nitpick. Most comments are already specific and actionable, and we do not want to provide feedback on those. We do NOT want to annoy reviewers with unnecessary feedback!" + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.146, + 0.857, + 0.176 + ], + "angle": 0, + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.179, + 0.359, + 0.193 + ], + "angle": 0, + "content": "- Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.196, + 0.856, + 0.225 + ], + "angle": 0, + "content": "* Reviewer comment: It appears that the linear mode connectivity results may be somewhat brittle." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.229, + 0.857, + 0.289 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Can you elaborate on why you see the results as brittle? It may also be helpful to describe in further detail how the authors can address your concern. For example, if you believe additional experiments or theoretical analyses are needed, it may be helpful to explicitly say so." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.196, + 0.857, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.256, + 0.292, + 0.359, + 0.307 + ], + "angle": 0, + "content": "- Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.31, + 0.856, + 0.339 + ], + "angle": 0, + "content": "* Reviewer comment: The paper writing is not fluent enough and needs polishing to be easier to follow." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.342, + 0.857, + 0.386 + ], + "angle": 0, + "content": "* Feedback to the reviewer: It would be helpful if you could provide specific examples of sections or sentences that are difficult to follow. This would give the authors more actionable feedback." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.31, + 0.857, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.256, + 0.39, + 0.359, + 0.405 + ], + "angle": 0, + "content": "- Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.408, + 0.856, + 0.452 + ], + "angle": 0, + "content": "* Reviewer comment: In the proposed method, an additional optimization problem is required to solve every iteration, i.e., Eq. (11). Thus the proposed method seems inefficient since it is a nested-loop algorithm." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.455, + 0.857, + 0.545 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Your concern about efficiency is valid, but it may be helpful to describe in further detail how the authors might address your concern. For example, you could ask about the computational complexity of solving Eq. (11) compared to the overall algorithm, or request empirical runtime comparisons to existing methods. This could help the authors address the efficiency concern more concretely." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.408, + 0.857, + 0.545 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.256, + 0.549, + 0.359, + 0.563 + ], + "angle": 0, + "content": "- Example 4:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.566, + 0.856, + 0.626 + ], + "angle": 0, + "content": "* Reviewer comment: The paper presents a limited number of baseline methods, and they are relatively outdated (between 2019 and 2021). Additionally, the paper lacks analytical experiments to substantiate that the proposed method has learned superior textual structural information." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.629, + 0.857, + 0.703 + ], + "angle": 0, + "content": "* Feedback to the reviewer: To strengthen this critique, consider suggesting specific, more recent baselines that you believe should be included. Also, providing examples of analytical experiments that could effectively demonstrate superior learning of textual structural information would make this feedback more actionable for the authors." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.566, + 0.857, + 0.703 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.256, + 0.707, + 0.359, + 0.722 + ], + "angle": 0, + "content": "- Example 5:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.724, + 0.856, + 0.814 + ], + "angle": 0, + "content": "* Reviewer comment: One of the assumptions of this paper is that \"most GNNs perform better on homophilic graphs\". I personally do not agree with it. A part of the heterophilic graphs are easy to fit, e.g., Wisconsin with \\(90 + \\%\\) accuracy, and some homophilic graphs are challenging. The difficulties of node classification on different datasets are not only related to the graph (label) homophily, but also related to the node features, and many other factors." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.817, + 0.856, + 0.878 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Your point is helpful, but it would be more actionable to ask the authors to provide evidence supporting their assumption, rather than simply disagreeing. Consider asking for specific examples or citations that demonstrate GNNs performing better on homophilic graphs." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.724, + 0.856, + 0.878 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.256, + 0.881, + 0.359, + 0.896 + ], + "angle": 0, + "content": "- Example 6:" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "19" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.273, + 0.1, + 0.762, + 0.114 + ], + "angle": 0, + "content": "* Reviewer comment: The numbers in table 1 are not described." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.118, + 0.857, + 0.179 + ], + "angle": 0, + "content": "* Feedback to the reviewer: It would be helpful to specify what aspects of the numbers in Table 1 need more description. Are you referring to the meaning of the values, their units, or something else? This would help the authors provide a more targeted response." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.1, + 0.857, + 0.179 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.181, + 0.856, + 0.211 + ], + "angle": 0, + "content": "The following are examples where the reviewer's comments are already specific and, most importantly, actionable, so you should not give any feedback:" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.214, + 0.857, + 0.305 + ], + "angle": 0, + "content": "- Reviewer comment: The paper claims occupancy is increased on Page 6 but it was unclear: (i) what definition of occupancy is being used (GPU resources could mean many things and occupancy often just refers to number of warps that can concurrently run versus max number supported by hardware); and (ii) whether any measurement has been made to confirm the claimed improvement (e.g., using NVIDIA Parallel Nsight or similar approaches for collecting performance counters)." + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.307, + 0.857, + 0.383 + ], + "angle": 0, + "content": "- Reviewer comment: Second paragraph under \"Semantic similarity\": I felt lots of details were missing here to better understand the quality of phrases, and the feasibility of the proposed approach. The Appendix A do not provide all necessary details. Is this done on the pretraining corpus? What trivial constituents were dropped out and why (some examples would help)?" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.385, + 0.857, + 0.431 + ], + "angle": 0, + "content": "- Reviewer comment: Some works like Saycan and RT2 also consider the match of the environment and the agent ability. Key differences between the proposed method and those existing works need to be more carefully discussed." + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.433, + 0.857, + 0.478 + ], + "angle": 0, + "content": "- Reviewer comment: The problem studied, and the techniques used, are closely related to Lipshitz bandits [2], pricing [3] and bilateral trade [1]. Please consider a more thorough comparison with the already known results and techniques there." + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.481, + 0.857, + 0.557 + ], + "angle": 0, + "content": "- Reviewer comment: In Table 3, FlashFFTConv outperforms torch.fft by up to \\(8.7\\mathrm{x}\\), while the speedup is about \\(2\\mathrm{x}\\) without the domain-specific optimizations. Does it mean the major speedup comes from the domain-specific optimizations instead of the FlashFFTConv algorithm? Could the authors conduct this ablation study (with and without the domain-specific optimizations) in other experiments?" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.558, + 0.857, + 0.634 + ], + "angle": 0, + "content": "- Reviewer comment: Then in Section 4.2, the authors propose to give the actor past actions to help it infer the state at the current step. I don't understand why is this not done by default. In my understanding, DOMDPs are POMDPs and in POMDPs, past actions and observations should always be given to the policy for optimal control. I don't see how this is an innovation." + }, + { + "type": "list", + "bbox": [ + 0.255, + 0.214, + 0.857, + 0.634 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.637, + 0.857, + 0.711 + ], + "angle": 0, + "content": "If a reviewer asks a question that is already clear, you do not need to give feedback on it or rephrase it. Questions need to be clear and specific, but they do not necessarily need to be actionable as they represent a reviewer's confusion. To be precise, in most cases if a comment ends in '?' you should ONLY give feedback if the question itself is unclear." + }, + { + "type": "text", + "bbox": [ + 0.245, + 0.712, + 0.857, + 0.741 + ], + "angle": 0, + "content": "Here are some examples of reviewer comments that are clear and specific, and therefore do not need feedback:" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.745, + 0.857, + 0.805 + ], + "angle": 0, + "content": "- Reviewer comment: 4) In Figure 6, Spearman rank correlation scores for HCMs are reported. As far as I know, Spearman rank correlation calculates the correlation between two variables. How was the correlation computed from multiple runs in this case?" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.808, + 0.857, + 0.853 + ], + "angle": 0, + "content": "- Reviewer comment: While there are detailed information about training procedure, not much is written about the actual inference step. For instance, how many samples for each prototype are required for reliable performance?" + }, + { + "type": "list", + "bbox": [ + 0.255, + 0.745, + 0.857, + 0.853 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.218, + 0.855, + 0.857, + 0.885 + ], + "angle": 0, + "content": "(c) If the reviewer claims the paper lacks novelty, ensure they specify why, including references to similar work. If they haven't, we would like to nudge the reviewer to" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "20" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.243, + 0.1, + 0.857, + 0.129 + ], + "angle": 0, + "content": " justify the claim, by prompting them to provide the most relevant references, the relationships, and specifying similarities or differences." + }, + { + "type": "text", + "bbox": [ + 0.243, + 0.13, + 0.857, + 0.16 + ], + "angle": 0, + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.164, + 0.359, + 0.178 + ], + "angle": 0, + "content": "- Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.181, + 0.856, + 0.21 + ], + "angle": 0, + "content": "* Reviewer comment: The paper's novelty is limited considering the ICLR standards." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.214, + 0.857, + 0.289 + ], + "angle": 0, + "content": "* Feedback to the reviewer: It would be really helpful to the authors if you consider discussing the reasons for why the novelty is limited, and specify what ICLR standards are in this context. In particular, it would be very helpful if you give examples of the closest papers, their similarities, and differences with the methods or results in the current paper." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.181, + 0.857, + 0.289 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.292, + 0.359, + 0.306 + ], + "angle": 0, + "content": "- Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.309, + 0.856, + 0.339 + ], + "angle": 0, + "content": "* Reviewer comment: The novelty of this work is not clear from the conclusion and experiments now." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.342, + 0.857, + 0.417 + ], + "angle": 0, + "content": "* Feedback to the reviewer: To make this feedback more actionable, it would be helpful to specify which aspects of novelty are unclear or missing. Are there particular claims or contributions that need more justification? Providing concrete suggestions for how the authors could better highlight the novelty would give them clearer guidance." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.309, + 0.857, + 0.417 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.255, + 0.42, + 0.359, + 0.435 + ], + "angle": 0, + "content": "- Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.438, + 0.856, + 0.467 + ], + "angle": 0, + "content": "* Reviewer comment: The proposed method is not innovative enough. I'm not an expert in this field, so I'm not sure about it." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.47, + 0.857, + 0.576 + ], + "angle": 0, + "content": "* Feedback to the reviewer: It would be helpful if you could elaborate on why you think the method may not be innovative enough, even if you're not an expert. Are there specific aspects that seem similar to existing work? If you're uncertain about the novelty, it's best to phrase this as a question or area for clarification rather than a definitive weakness. For example, you could ask the authors to further explain how their approach differs from or improves upon existing methods for training vision-language models for satellite imagery." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.438, + 0.857, + 0.576 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.244, + 0.578, + 0.856, + 0.608 + ], + "angle": 0, + "content": "The following are examples where the reviewer's discussion of novelty is already detailed and actionable as written, so you should not give any feedback:" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.611, + 0.857, + 0.733 + ], + "angle": 0, + "content": "- Reviewer comment: DASHA is a mash-up between MARINA and existing distributed nonconvex optimization methods. Other than the fact that three variants of DASHA get rid of the uncompressed synchronization in MARINA, this reviewer could not pinpoint a difference between MARINA and DASHA. As such, the main novelty of this work seems to be in terms of theoretical analysis of MARINA when the uncompressed synchronization step is removed. The authors could have done a better job of clarifying where does this novelty lie in the analysis (e.g., pinpointing the key analytical approaches in the lemma that helped improve the analysis)" + }, + { + "type": "text", + "bbox": [ + 0.255, + 0.735, + 0.857, + 0.809 + ], + "angle": 0, + "content": "- Reviewer comment: I'm not sure the paper has sufficient novelty to be published in the top-tier conference since the proposed method only goes one step further from Task Arithmetic [1] and TIES-MERGING [2] by incorporating trainable weights for task vectors. The concept seems thin to support an entire paper, with only one page (page 6) dedicated to the novel part." + }, + { + "type": "list", + "bbox": [ + 0.255, + 0.611, + 0.857, + 0.809 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.812, + 0.857, + 0.888 + ], + "angle": 0, + "content": "(d) Identify any personal attacks or inappropriate remarks made by the reviewer. This can be about the personality, the knowledge, or the experience of the authors. For example, they call the work \"incompetent\" without justifying why. For this case, we would like to kindly warn the reviewer about their comment and politely suggest they revise their language." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.508, + 0.948 + ], + "angle": 0, + "content": "21" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.245, + 0.1, + 0.857, + 0.131 + ], + "angle": 0, + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.134, + 0.359, + 0.148 + ], + "angle": 0, + "content": "- Example 1:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.15, + 0.857, + 0.181 + ], + "angle": 0, + "content": "* Reviewer comment: The authors clearly do not live in the real world and do not care about people or downstream effects of their research." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.183, + 0.857, + 0.213 + ], + "angle": 0, + "content": "* Feedback to the reviewer: We kindly suggest you revise this comment, as it includes remarks about the personalities or intents of the authors." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.15, + 0.857, + 0.213 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.216, + 0.359, + 0.231 + ], + "angle": 0, + "content": "- Example 2:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.234, + 0.857, + 0.262 + ], + "angle": 0, + "content": "* Reviewer comment: This paper is embarrassing, and you are clearly not fit to be in research." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.266, + 0.857, + 0.311 + ], + "angle": 0, + "content": "* Feedback to the reviewer: We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.234, + 0.857, + 0.311 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.256, + 0.315, + 0.359, + 0.329 + ], + "angle": 0, + "content": "- Example 3:" + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.332, + 0.857, + 0.407 + ], + "angle": 0, + "content": "* Reviewer comment: This MC-IS method for estimating the score will NEVER work well in high dimensions due to variance and thus why works such as [1,2,3,4] which are clearly aware of this formulation (as they either state it in their appendices or use it for subsequent calculation) pursue an optimization alternative to estimating the drift." + }, + { + "type": "text", + "bbox": [ + 0.273, + 0.41, + 0.857, + 0.469 + ], + "angle": 0, + "content": "* Feedback to the reviewer: Consider revising this comment to avoid absolute statements like \"NEVER\". Instead, you could phrase it as a concern about scalability to high dimensions, and ask the authors to address this limitation or provide evidence that it can work in higher dimensions." + }, + { + "type": "list", + "bbox": [ + 0.273, + 0.332, + 0.857, + 0.469 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.48, + 0.31, + 0.495 + ], + "angle": 0, + "content": "3. Provide feedback:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.506, + 0.857, + 0.535 + ], + "angle": 0, + "content": "- For each comment that fails according to the checklist, write concise feedback in the following format:" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.541, + 0.57, + 0.554 + ], + "angle": 0, + "content": "- Comment: the verbatim comment of interest" + }, + { + "type": "text", + "bbox": [ + 0.227, + 0.559, + 0.482, + 0.572 + ], + "angle": 0, + "content": "- Feedback: your concise feedback" + }, + { + "type": "list", + "bbox": [ + 0.227, + 0.541, + 0.57, + 0.572 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.578, + 0.851, + 0.593 + ], + "angle": 0, + "content": "- If you do not identify any issues with a comment, do not include it in your feedback list." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.599, + 0.824, + 0.614 + ], + "angle": 0, + "content": "- If you find no issues in the review at all, respond with: 'Thanks for your hard work!'" + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.578, + 0.851, + 0.614 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.624, + 0.225, + 0.637 + ], + "angle": 0, + "content": "Remember:" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.649, + 0.68, + 0.663 + ], + "angle": 0, + "content": "- Be concise, limiting your feedback for each comment to 1-2 sentences." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.674, + 0.783, + 0.689 + ], + "angle": 0, + "content": "- Do not summarize your feedback at the end or include a preamble at the beginning." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.699, + 0.857, + 0.729 + ], + "angle": 0, + "content": "- Do not repeat anything the reviewer already included in their review, and do not praise anything the reviewer wrote as we want to provide constructive feedback." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.739, + 0.857, + 0.769 + ], + "angle": 0, + "content": "- Your feedback will be sent to reviewers. Do not mention that you are using a checklist or guidelines." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.78, + 0.857, + 0.809 + ], + "angle": 0, + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.82, + 0.857, + 0.851 + ], + "angle": 0, + "content": "- Do not provide feedback to any comments that mention a score or rating. You do not care about the reviewer's score or rating for this paper." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.86, + 0.622, + 0.875 + ], + "angle": 0, + "content": "- Do not provide feedback to any comments that discuss typos." + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.649, + 0.857, + 0.875 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "22" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.141, + 0.094, + 0.283, + 0.108 + ], + "angle": 0, + "content": "Aggregator Prompt" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.121, + 0.475, + 0.136 + ], + "angle": 0, + "content": "Here is the paper: {paper} ." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.137, + 0.73, + 0.151 + ], + "angle": 0, + "content": "Here are the lists of feedback: {feedbacks} ." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.152, + 0.547, + 0.167 + ], + "angle": 0, + "content": "Here is the peer review: {review} ." + }, + { + "type": "title", + "bbox": [ + 0.141, + 0.189, + 0.339, + 0.204 + ], + "angle": 0, + "content": "Aggregator System Prompt" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.216, + 0.859, + 0.261 + ], + "angle": 0, + "content": "You will be given multiple lists of feedback about a peer review of a machine learning paper submitted to a top-tier ML conference. The aim of the feedback is to guide a reviewer to make the review high-quality. Your task is to aggregate the lists of feedback into one list." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.276, + 0.857, + 0.306 + ], + "angle": 0, + "content": "Here are the guidelines that were followed to generate the feedback lists originally: {ACTOR_SYSTEM_CHART} " + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.307, + 0.388, + 0.321 + ], + "angle": 0, + "content": "Here are step-by-step instructions:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.331, + 0.856, + 0.36 + ], + "angle": 0, + "content": "1. Read the multiple feedback lists provided for that review, the text of the review, and the paper about which the review was written." + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.372, + 0.856, + 0.4 + ], + "angle": 0, + "content": "2. For all feedback lists, aggregate them into one list with the best comment-feedback pairs from each list:" + }, + { + "type": "list", + "bbox": [ + 0.16, + 0.331, + 0.856, + 0.4 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.412, + 0.856, + 0.442 + ], + "angle": 0, + "content": "- For each comment-feedback pair in the multiple lists that are similar, determine which provides the best feedback and keep only that pair." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.447, + 0.857, + 0.492 + ], + "angle": 0, + "content": "- If there are unique comment-feedback pairs in the multiple lists, critically determine if it is an essential piece of feedback needed to improve the review. If it is unnecessary or redundant, remove the comment-feedback pair." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.497, + 0.856, + 0.528 + ], + "angle": 0, + "content": "- You should end up with one feedback list that has no repeated comments from the review and that is high quality." + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.533, + 0.856, + 0.548 + ], + "angle": 0, + "content": "- Return the feedback list in the format you received it in, where the pairs are formatted as:" + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.412, + 0.857, + 0.548 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.228, + 0.552, + 0.613, + 0.568 + ], + "angle": 0, + "content": "- Comment: {{the verbatim comment of interest}}" + }, + { + "type": "text", + "bbox": [ + 0.23, + 0.57, + 0.523, + 0.587 + ], + "angle": 0, + "content": "- Feedback: {{your concise feedback}}" + }, + { + "type": "list", + "bbox": [ + 0.228, + 0.552, + 0.613, + 0.587 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.141, + 0.608, + 0.244, + 0.622 + ], + "angle": 0, + "content": "Critic Prompt" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.635, + 0.475, + 0.65 + ], + "angle": 0, + "content": "Here is the paper: {paper} ." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.651, + 0.578, + 0.666 + ], + "angle": 0, + "content": "Here is the feedback: {feedback} ." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.667, + 0.547, + 0.681 + ], + "angle": 0, + "content": "Here is the peer review: {review} ." + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.696, + 0.223, + 0.709 + ], + "angle": 0, + "content": "Remember:" + }, + { + "type": "text", + "bbox": [ + 0.165, + 0.721, + 0.856, + 0.751 + ], + "angle": 0, + "content": "- You are a critic that will help reviewers improve their comments and reviews. Your valuable feedback will help improve their review." + }, + { + "type": "text", + "bbox": [ + 0.164, + 0.761, + 0.856, + 0.79 + ], + "angle": 0, + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + }, + { + "type": "list", + "bbox": [ + 0.164, + 0.721, + 0.856, + 0.79 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.141, + 0.811, + 0.301, + 0.826 + ], + "angle": 0, + "content": "Critic System Prompt" + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.837, + 0.859, + 0.884 + ], + "angle": 0, + "content": "You are a critic that will help reviewers improve their reviews. You are given a list of feedback to the reviewer comments of a machine learning paper submitted to a top-tier ML conference on OpenReview. The aim of the feedback is to guide a reviewer to improve their comments and re" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "23" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.139, + 0.1, + 0.856, + 0.116 + ], + "angle": 0, + "content": "view as a whole. Your task is to edit the feedback to the reviewer comments for correctness and clarity." + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.13, + 0.857, + 0.16 + ], + "angle": 0, + "content": "Here, feedback means the feedback given to the reviewer comments to improve them, so the feedback will be given to the reviewer." + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.176, + 0.857, + 0.221 + ], + "angle": 0, + "content": "Here are the guidelines that were followed to generate the feedback to the reviewer comments originally: {ACTOR_SYSTEM_PROMPT} . You should keep in mind to adhere to the above guidelines." + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.237, + 0.388, + 0.252 + ], + "angle": 0, + "content": "Here are step-by-step instructions:" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.261, + 0.856, + 0.291 + ], + "angle": 0, + "content": "1. Read the feedback list provided for reviewer comments, the full text of the review itself, and the paper about which the review was written." + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.301, + 0.561, + 0.316 + ], + "angle": 0, + "content": "2. Evaluate every piece of feedback in the feedback list:" + }, + { + "type": "list", + "bbox": [ + 0.158, + 0.261, + 0.856, + 0.316 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.326, + 0.856, + 0.385 + ], + "angle": 0, + "content": "- For each feedback item, it is imperative that you evaluate the correctness of the feedback. If there is a quote in the feedback, ensure that the quote appears verbatim in the paper. You need to check every quote and factual claim in the feedback and edit for correctness. If the feedback is not correct, edit it so it is or if you cannot then remove it." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.391, + 0.856, + 0.42 + ], + "angle": 0, + "content": "- For each feedback item, evaluate if it is clear. You should make sure it would not confuse or frustrate the reviewer who reads it." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.326, + 0.856, + 0.42 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.431, + 0.856, + 0.491 + ], + "angle": 0, + "content": "3. Remove comment-feedback pairs that are too nitpicky, unnecessary, or superficial. Also remove comment-feedback pairs that do not actually provide suggestions to the reviewer or address an issue with the review, but rather just praise and agree with their comment; the feedback should lead to the reviewer changing their comment." + }, + { + "type": "text", + "bbox": [ + 0.178, + 0.496, + 0.856, + 0.525 + ], + "angle": 0, + "content": "Here are some examples of comment-feedback pairs that should be entirely removed from the final feedback list:" + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.536, + 0.856, + 0.565 + ], + "angle": 0, + "content": "(a) Reviewer comment: The novelty remains concerned. It seems that the algorithm combines MLR + a bisimulation loss." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.569, + 0.856, + 0.644 + ], + "angle": 0, + "content": "Feedback to the reviewer: It would be helpful if you could elaborate on why you see the novelty as limited. In particular, it would be very helpful if you could discuss the key differences between simply combining MLR and bisimulation loss versus the approach proposed in this paper. This would give the authors a better chance to clarify the novelty of their work." + }, + { + "type": "text", + "bbox": [ + 0.186, + 0.649, + 0.856, + 0.724 + ], + "angle": 0, + "content": "(b) Reviewer comment: The paper lacks a proper related work section, which makes it challenging for readers to quickly grasp the background and understand the previous works. It is crucial to include a comprehensive discussion on related works, especially regarding the variance-reduced ZO hard-thresholding algorithm and the variance reduction aspect." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.727, + 0.856, + 0.787 + ], + "angle": 0, + "content": "Feedback to the reviewer: To make this comment more actionable, consider suggesting specific areas of related work that should be covered, such as key papers on ZO optimization, hard-thresholding methods, and variance reduction techniques in optimization. This will give the authors clearer guidance on what to include." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.792, + 0.856, + 0.897 + ], + "angle": 0, + "content": "(c) Reviewer comment: The paper is not very well-presented and is hard to follow. First of all, it is unclear in the hybrid setting considered, what are the relative relations of the guest parties? In the introduction, it appears that they share the same feature space but have different sample IDs, however, in 3.1 they appear to have different dimensions and unclear alignment. It is suggested that the paper properly define the problem setting. A figure on how data is partitioned by different parties would also help." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "24" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.217, + 0.1, + 0.857, + 0.146 + ], + "angle": 0, + "content": "Feedback to the reviewer: Your suggestion for a clearer definition of the problem setting and a visual representation of data partitioning is excellent. This would significantly improve the paper's clarity and readability." + }, + { + "type": "text", + "bbox": [ + 0.187, + 0.15, + 0.857, + 0.211 + ], + "angle": 0, + "content": "(d) Reviewer comment: 3) the model performance of the proposed methods still appear to be a little inferior to the centralized setting, not exactly \"comparable\" as claimed. It is important to understand whether the proposed method is \"lossless\" or \"lossy\" and why. I think more detailed examinations and explanations are needed here." + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.214, + 0.857, + 0.275 + ], + "angle": 0, + "content": "Feedback to the reviewer: Your observation about the performance gap between the proposed method and the centralized setting is insightful. Requesting a more detailed analysis of whether the method is lossless or lossy, along with explanations for any performance differences, would significantly enhance the paper's contribution." + }, + { + "type": "text", + "bbox": [ + 0.188, + 0.279, + 0.857, + 0.339 + ], + "angle": 0, + "content": "(e) Reviewer comment: Q2: It appears that the introduced projection loss can be directly optimized with respect to the trigger \\( T \\). What's the rationale behind setting an upper bound and optimizing the projection loss through this bound? Does this approach offer computational benefits?" + }, + { + "type": "text", + "bbox": [ + 0.216, + 0.342, + 0.857, + 0.402 + ], + "angle": 0, + "content": "Feedback to the reviewer: This question effectively probes the authors' methodological choices. It's a clear and concise query that could lead to valuable insights about the paper's approach. The authors' response could provide important context about the trade-offs involved in their method." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.412, + 0.449, + 0.427 + ], + "angle": 0, + "content": "4. Edit comments based on evaluations:" + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.437, + 0.856, + 0.468 + ], + "angle": 0, + "content": "- Do not add any new points unless the previous feedback obviously missed something important." + }, + { + "type": "text", + "bbox": [ + 0.199, + 0.473, + 0.777, + 0.489 + ], + "angle": 0, + "content": "- If you do not identify any issues with a comment-feedback pair, do not edit it." + }, + { + "type": "list", + "bbox": [ + 0.199, + 0.437, + 0.856, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.497, + 0.857, + 0.529 + ], + "angle": 0, + "content": "5. The feedback will be shared with the reviewers for them to improve their comments. Address the reviewer in the second person (e.g., \"you\") and do not refer to them as \"the reviewer.\"" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.538, + 0.831, + 0.554 + ], + "angle": 0, + "content": "6. Return the feedback list in the format you received it in, where the pairs are formatted as:" + }, + { + "type": "list", + "bbox": [ + 0.158, + 0.497, + 0.857, + 0.554 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.563, + 0.582, + 0.579 + ], + "angle": 0, + "content": "- Comment: {{the verbatim comment of interest}}" + }, + { + "type": "text", + "bbox": [ + 0.2, + 0.583, + 0.493, + 0.599 + ], + "angle": 0, + "content": "- Feedback: {{your concise feedback}}" + }, + { + "type": "list", + "bbox": [ + 0.2, + 0.563, + 0.582, + 0.599 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.609, + 0.238, + 0.623 + ], + "angle": 0, + "content": "Remember:" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.634, + 0.736, + 0.649 + ], + "angle": 0, + "content": "- You are a critic that will help reviewers improve their comments and reviews." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.659, + 0.744, + 0.674 + ], + "angle": 0, + "content": "- Be concise, limiting your feedback for each reviewer comment to 1-2 sentences." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.684, + 0.783, + 0.7 + ], + "angle": 0, + "content": "- Do not summarize your feedback at the end or include a preamble at the beginning." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.71, + 0.679, + 0.724 + ], + "angle": 0, + "content": "- Do not repeat anything the reviewer already included in their review." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.735, + 0.61, + 0.75 + ], + "angle": 0, + "content": "- Do not mention that you are using a checklist or guidelines." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.759, + 0.857, + 0.789 + ], + "angle": 0, + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.634, + 0.857, + 0.789 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.809, + 0.275, + 0.824 + ], + "angle": 0, + "content": "Formatter Prompt" + }, + { + "type": "text", + "bbox": [ + 0.14, + 0.836, + 0.512, + 0.853 + ], + "angle": 0, + "content": "Here is the feedback for you to format: {feedback}" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "25" + } + ], + [ + { + "type": "title", + "bbox": [ + 0.14, + 0.094, + 0.333, + 0.108 + ], + "angle": 0, + "content": "Formatter System Prompt" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.12, + 0.859, + 0.181 + ], + "angle": 0, + "content": "You will be given a set of feedback given to various reviewer comments in a peer review of a machine learning paper. Your response, which will be the list of reviewer comments and feedback to them, will be shared with the reviewers who wrote the review, so that they can improve their reviews and the peer review cycle." + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.196, + 0.859, + 0.227 + ], + "angle": 0, + "content": "Your task is to format the feedback into a structured format. You should format the feedback as a list of comment-feedback pairs:" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.239, + 0.464, + 0.255 + ], + "angle": 0, + "content": "- Reviewer comment: \\(\\{\\{\\mathrm{a~comment}\\} \\}\\)" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.263, + 0.614, + 0.28 + ], + "angle": 0, + "content": "- Feedback to the reviewer: {{feedback to the comment}}" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.289, + 0.51, + 0.306 + ], + "angle": 0, + "content": "- Reviewer comment: {{another comment}}" + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.314, + 0.613, + 0.33 + ], + "angle": 0, + "content": "- Feedback to the reviewer: {{feedback to the comment}}" + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.239, + 0.614, + 0.33 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.18, + 0.339, + 0.199, + 0.348 + ], + "angle": 0, + "content": "中" + }, + { + "type": "text", + "bbox": [ + 0.137, + 0.362, + 0.859, + 0.392 + ], + "angle": 0, + "content": "Your goal is to only keep feedback to the reviewers that can help them improve their comments. You should only pay attention to lines that start with \"Comment\" or \"Feedback\"." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.404, + 0.859, + 0.435 + ], + "angle": 0, + "content": "- Only keep the comment-feedback pairs where the feedback can help improve the reviewer. If there is no suggestion for improvement, remove the entire comment-feedback pair." + }, + { + "type": "text", + "bbox": [ + 0.195, + 0.445, + 0.857, + 0.474 + ], + "angle": 0, + "content": "- Here is an example of a comment-feedback pair that should be removed from the final feedback list:" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.48, + 0.857, + 0.541 + ], + "angle": 0, + "content": "* Reviewer comment: Section 2.2. \"It independently formulates new approaches\" → Is it a hallucination or a feature? It looks like a hallucination to me. If this is important for achieving good performance, can you provide an ablation study based on whether to allow new approaches or not?" + }, + { + "type": "text", + "bbox": [ + 0.229, + 0.543, + 0.857, + 0.589 + ], + "angle": 0, + "content": "* Feedback to the reviewer: This is a thoughtful question about an important aspect of the methodology. Your suggestion for an ablation study is particularly valuable and could provide insights into the method's effectiveness." + }, + { + "type": "text", + "bbox": [ + 0.196, + 0.593, + 0.857, + 0.624 + ], + "angle": 0, + "content": "- If the feedback says \"No changes needed\" or something with a similar meaning, remove the entire comment-feedback pair." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.633, + 0.857, + 0.663 + ], + "angle": 0, + "content": "- Do not modify the content of the feedback at all, only format it into the bullet point format described above." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.673, + 0.857, + 0.72 + ], + "angle": 0, + "content": "- The response you send will be immediately shared with the reviewers. Thus, there should be NO OTHER TEXT in the output, for example no preamble or conclusion sentences. Only respond with the list of feedback & reviewer comment bullets, and no other text." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.729, + 0.857, + 0.76 + ], + "angle": 0, + "content": "- Since your response will immediately be sent to the reviewers, if there is no feedback, just say \"Thanks for your hard work!\"." + }, + { + "type": "list", + "bbox": [ + 0.163, + 0.633, + 0.857, + 0.76 + ], + "angle": 0, + "content": null + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "26" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.138, + 0.092, + 0.6, + 0.108 + ], + "angle": 0, + "content": "We also provide the prompt used for the incorporation analysis:" + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.117, + 0.367, + 0.133 + ], + "angle": 0, + "content": "Incorporation Analysis Prompt" + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.144, + 0.86, + 0.176 + ], + "angle": 0, + "content": "Task: Determine if the following feedback suggestion was incorporated into the modified version of a review. Also, categorize the given feedback into exactly one of these three categories:" + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.185, + 0.857, + 0.245 + ], + "angle": 0, + "content": "1. ACTIONABLE_VAGUE: Encouraging reviewers to rephrase vague review comments, making them more actionable for the authors. For example, the feedback says: \"It would be helpful to suggest specific baselines that you think must be included. Are there particular methods you feel are missing from the current comparison? Could you elaborate why?\"" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.255, + 0.857, + 0.316 + ], + "angle": 0, + "content": "2. CONTENTClarIFY: Highlighting sections of the paper that may already address some of the reviewer's questions (clarifying content). For example, the feedback says: \"Does Figure 5 of the paper answer your question? In particular: 'In Transformers, the proposed technique provides \\(25\\%\\) relative improvement in wall-clock time (Figure 5)'.\"" + }, + { + "type": "text", + "bbox": [ + 0.158, + 0.326, + 0.857, + 0.386 + ], + "angle": 0, + "content": "3. ADDRESS_UNPROFESSIONAL: Identifying and addressing unprofessional or inappropriate remarks in the review. For example, the feedback says: \"We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors.\"" + }, + { + "type": "list", + "bbox": [ + 0.158, + 0.185, + 0.857, + 0.386 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.14, + 0.397, + 0.248, + 0.411 + ], + "angle": 0, + "content": "Instructions:" + }, + { + "type": "text", + "bbox": [ + 0.16, + 0.422, + 0.512, + 0.437 + ], + "angle": 0, + "content": "1. Read the original review and modified review." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.447, + 0.737, + 0.461 + ], + "angle": 0, + "content": "2. Read the reviewer's original comment and the feedback given to the reviewer." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.472, + 0.857, + 0.546 + ], + "angle": 0, + "content": "3. Determine if the changes suggested in the feedback were incorporated into the modified review as compared to the original review. If the reviewer's original comment appears verbatim in the modified review still, you should return FALSE for the incorporation. The incorporations should be clear and quite explicit. Think critically about if the incorporation is significant enough to count." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.558, + 0.839, + 0.573 + ], + "angle": 0, + "content": "4. Determine which of the three categories best describes the primary purpose of the feedback." + }, + { + "type": "text", + "bbox": [ + 0.159, + 0.583, + 0.52, + 0.598 + ], + "angle": 0, + "content": "5. Think step by step and explain your reasoning." + }, + { + "type": "list", + "bbox": [ + 0.159, + 0.422, + 0.857, + 0.598 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.138, + 0.608, + 0.857, + 0.638 + ], + "angle": 0, + "content": "Output Format: Please provide your final answer as two comma-separated values between tags, where:" + }, + { + "type": "text", + "bbox": [ + 0.162, + 0.648, + 0.819, + 0.664 + ], + "angle": 0, + "content": "- The first boolean is TRUE or FALSE depending on whether the feedback was incorporated." + }, + { + "type": "text", + "bbox": [ + 0.163, + 0.674, + 0.857, + 0.703 + ], + "angle": 0, + "content": "- The second string is one of these three options: ACTIONABLE_VAGUE, CONTENT_CLRIFY, or ADDRESS_UNPROFESSIONAL." + }, + { + "type": "list", + "bbox": [ + 0.162, + 0.648, + 0.857, + 0.703 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.139, + 0.713, + 0.559, + 0.729 + ], + "angle": 0, + "content": "Example: TRUE, ACTIONABLE_VAGUE" + }, + { + "type": "title", + "bbox": [ + 0.114, + 0.761, + 0.347, + 0.781 + ], + "angle": 0, + "content": "B Reliability tests" + }, + { + "type": "text", + "bbox": [ + 0.112, + 0.792, + 0.884, + 0.823 + ], + "angle": 0, + "content": "We generated the following reliability tests to be run in real-time after feedback was generated. For each reliability test, we provide examples of feedback that would fail it:" + }, + { + "type": "text", + "bbox": [ + 0.133, + 0.829, + 0.88, + 0.858 + ], + "angle": 0, + "content": "1. Praising the reviewer: make sure the feedback does not simply praise what the reviewer wrote without providing critical suggestions to improve their comment." + }, + { + "type": "text", + "bbox": [ + 0.153, + 0.859, + 0.817, + 0.875 + ], + "angle": 0, + "content": "Example feedback: \"This is a good question that challenges a key assumption of the paper.\"" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.882, + 0.884, + 0.913 + ], + "angle": 0, + "content": "2. Addressing feedback to the author: certify that the feedback is addressed to the reviewer with suggestions to make their review better, rather than addressed to the author of the paper with suggestions" + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "27" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.152, + 0.092, + 0.428, + 0.106 + ], + "angle": 0, + "content": "on how they can improve their paper." + }, + { + "type": "text", + "bbox": [ + 0.152, + 0.107, + 0.884, + 0.168 + ], + "angle": 0, + "content": "Example feedback: \"To strengthen your paper, consider discussing the relationship between FrugalGPT and traditional ensembling techniques. Highlight both similarities and differences and explain how this relates to the observed quality improvements. This would provide more context for your results and situate your work within the broader field of machine learning.\"" + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.177, + 0.884, + 0.207 + ], + "angle": 0, + "content": "3. Restate what the reviewer wrote: does the feedback simply restate what the review comment says without providing any new meaningful and unique suggestions?" + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.208, + 0.882, + 0.237 + ], + "angle": 0, + "content": "Example reviewer comment: Can examples or further clarification be given for the 3.1 sentence \"enhancing the accountability of the output\"? This isn't clear, at least to me." + }, + { + "type": "text", + "bbox": [ + 0.151, + 0.238, + 0.884, + 0.283 + ], + "angle": 0, + "content": "Example feedback: This is a good point that could lead to improved clarity in the paper. To make your comment more actionable, you could ask the authors to provide examples or further clarification for the sentence \"enhancing the accountability of the output\"." + }, + { + "type": "text", + "bbox": [ + 0.132, + 0.293, + 0.884, + 0.323 + ], + "angle": 0, + "content": "4. Format is correct: ensure that all feedback pairs are in the correct format, protecting against any errors in the pipeline that could have led to malformed feedback." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.346, + 0.836, + 0.366 + ], + "angle": 0, + "content": "C Average score changes during review and rebuttal periods" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.377, + 0.884, + 0.496 + ], + "angle": 0, + "content": "In Figure S1A, we examined the potential change in review scores (soundness, presentation, contribution, rating, and confidence) between the initial and modified reviews across the groups during the review period. We found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). We also saw that of reviewers who received feedback, reviewers who updated their review were significantly more likely to decrease their soundness score and increase their confidence score at the end of the review period (before the rebuttal period began) compared to those who did not update their review. This suggests that reviewers who updated their reviews became more confident in their assessments." + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.498, + 0.884, + 0.575 + ], + "angle": 0, + "content": "In Figure S1B, we conducted the same analysis during the rebuttal period. Similar to the review period, we found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). Of reviewers who received feedback, those who updated their reviews significantly increased all scores except confidence compared to those who did not update their reviews. From this, we see that reviewers who updated their reviews were much more engaged in the rebuttal process." + }, + { + "type": "title", + "bbox": [ + 0.112, + 0.596, + 0.524, + 0.617 + ], + "angle": 0, + "content": "D Incorporation model validation" + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.627, + 0.885, + 0.795 + ], + "angle": 0, + "content": "To test our incorporation model, we hand-labeled a test set of 222 feedback items (from 63 randomly chosen reviews that had been updated) as being incorporated into the updated review or not. We labeled 132 of those items as incorporated (59.5%) and 90 as not (40.5%). We then ran those 222 feedback items through the LLM pipeline and received a 92% accuracy rate, with a false negative rate of 0.9% and a false positive rate of 5.9% (see Supplementary Figure S2). Of the false positives, 8/13 were instances of human error where the labeler missed that the item was incorporated into the review, and the model accurately identified this incorporation. The remaining 5 false positives were due to subjectivity - the model reasoned that the reviewer partially incorporated the sentiments of the feedback, whereas the labeler did not view that as sufficient enough to count as incorporated. The two false negatives represent data points the labeler initially mislabeled and the model correctly labeled. This effectively gives us a false negative rate of 0% and a false positive rate of 2.25%, allowing us to be confident that our incorporation pipeline was highly accurate." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "28" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.117, + 0.212, + 0.133, + 0.223 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.128, + 0.224, + 0.486, + 0.614 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.511, + 0.213, + 0.523, + 0.223 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image", + "bbox": [ + 0.524, + 0.224, + 0.877, + 0.614 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.111, + 0.653, + 0.884, + 0.79 + ], + "angle": 0, + "content": "Supplementary Figure S1: (A) Review period score changes. (Top) There is no significant difference in updating scores (measured between initial and pre-rebuttal reviews) between the feedback and control groups. (Bottom) Among reviewers who received feedback, those who updated their reviews were more likely to decrease soundness scores \\((\\mathrm{p} \\leq 0.05)\\) and increase confidence scores \\((\\mathrm{p} \\leq 0.05)\\) compared to those who did not update their reviews. (B) Rebuttal period score changes. (Top) There is no significant difference in updating scores (measured between pre- and post-rebuttal reviews) between the feedback and control groups. (Bottom) Among feedback recipients, reviewers who updated their reviews demonstrated significantly larger score increases across all metrics (soundness: \\(^{**}\\mathrm{p} \\leq 0.01\\); presentation: \\(^{***}\\mathrm{p} \\leq 0.001\\); contribution: \\(^{\\ast}\\mathrm{p} \\leq 0.05\\); rating: \\(^{***}\\mathrm{p} \\leq 0.001\\)) except confidence, compared to non-updaters." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "29" + } + ], + [ + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.277, + 0.135, + 0.287 + ], + "angle": 0, + "content": "A" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.281, + 0.232, + 0.304 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.234, + 0.284, + 0.354, + 0.301 + ], + "angle": 0, + "content": "Original review" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.324, + 0.231, + 0.347 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.232, + 0.326, + 0.361, + 0.342 + ], + "angle": 0, + "content": "Modified review" + }, + { + "type": "image", + "bbox": [ + 0.206, + 0.366, + 0.231, + 0.389 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.233, + 0.369, + 0.354, + 0.384 + ], + "angle": 0, + "content": "Feedback item" + }, + { + "type": "image", + "bbox": [ + 0.422, + 0.318, + 0.59, + 0.353 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.606, + 0.317, + 0.796, + 0.353 + ], + "angle": 0, + "content": "Feedback incorporated (Y/N)" + }, + { + "type": "image", + "bbox": [ + 0.345, + 0.414, + 0.645, + 0.602 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.12, + 0.415, + 0.134, + 0.426 + ], + "angle": 0, + "content": "B" + }, + { + "type": "image_caption", + "bbox": [ + 0.112, + 0.637, + 0.884, + 0.728 + ], + "angle": 0, + "content": "Supplementary Figure S2: (A) Incorporation model pipeline. Given the original review text, modified review text, and individual feedback item, the LLM determined if the feedback was incorporated into the modified review or not. (B) Model accuracy. Our incorporation model successfully labeled \\(92\\%\\) of the test feedback items, where human annotators determined the ground truth labeling. Of the false positives, the majority were instances of human error where the model accurately identified the missed incorporation. All of the false negatives were instances of human error that the model caught." + }, + { + "type": "page_number", + "bbox": [ + 0.49, + 0.936, + 0.509, + 0.948 + ], + "angle": 0, + "content": "30" + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_origin.pdf b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..9112456a2018c55a3301fc572c318a4698e06d4f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/7d7c8fb3-f67d-4aab-8ccf-59890ce81391_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:60f112f1ee057d302df6069e7eacc6d5864d53dc001307100c9a7e190a9f90f1 +size 7294617 diff --git a/data/2025/2504_09xxx/2504.09737/full.md b/data/2025/2504_09xxx/2504.09737/full.md new file mode 100644 index 0000000000000000000000000000000000000000..9a7216b0c23105ccf6695e87ebfa73805d81dd8c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/full.md @@ -0,0 +1,697 @@ +# Can LLM feedback enhance review quality? A randomized study of 20K reviews at ICLR 2025 + +Nitya Thakkar1, Mert Yuksekgonul1, Jake Silberg1, Animesh Garg2, Nanyun Peng3, Fei Sha4, Rose Yu5, Carl Vondrick6, James Zou1 + +$^{1}$ Stanford University + +2Georgia Institute of Technology + +3University of California, Los Angeles + +4Google Research + +5University of California, San Diego + +$^{6}$ Columbia University + +# Abstract + +Peer review at AI conferences is stressed by rapidly rising submission volumes, leading to deteriorating review quality and increased author dissatisfaction. To address these issues, we developed Review Feedback Agent, a system leveraging multiple large language models (LLMs) to improve review clarity and actionability by providing automated feedback on vague comments, content misunderstandings, and unprofessional remarks to reviewers. Implemented at ICLR 2025 as a large randomized control study, our system provided optional feedback to more than 20,000 randomly selected reviews. To ensure high-quality feedback for reviewers at this scale, we also developed a suite of automated reliability tests powered by LLMs that acted as guardrails to ensure feedback quality, with feedback only being sent to reviewers if it passed all the tests. The results show that $27\%$ of reviewers who received feedback updated their reviews, and over 12,000 feedback suggestions from the agent were incorporated by those reviewers. This suggests that many reviewers found the AI-generated feedback sufficiently helpful to merit updating their reviews. Incorporating AI feedback led to significantly longer reviews (an average increase of 80 words among those who updated after receiving feedback) and more informative reviews, as evaluated by blinded researchers. Moreover, reviewers who were selected to receive AI feedback were also more engaged during paper rebuttals, as seen in longer author-reviewer discussions. This work demonstrates that carefully designed LLM-generated review feedback can enhance peer review quality by making reviews more specific and actionable while increasing engagement between reviewers and authors. The Review Feedback Agent is publicly available at https://github.com/zou-group/review_feedback_agent. + +# 1 Introduction + +Scientific peer review is a critical step before publication, where domain experts evaluate the research to ensure thoroughness and scientific integrity, prevent false claims, and provide a strong foundation for future work [1, 2]. High-quality reviews are essential for authors to improve their work, address key limitations, and advance scientific progress. However, in a survey of 11,800 researchers worldwide, while $98\%$ view peer review as essential to maintaining the quality and integrity of academic communication, only $55.4\%$ expressed satisfaction with the quality of reviews they receive [3]. This dissatisfaction has grown as obtaining constructive and high-quality peer reviews has become more challenging due to the increase in the number of paper submissions, especially in fast-moving areas like Artificial Intelligence (AI) [4, 5]. For example, the International Conference on Learning Representations (ICLR) experienced year-over-year submission increases of $47\%$ in 2024 and $61\%$ in 2025 [6]. To maintain a rigorous and meaningful peer review process amid this growth, it is crucial to address the growing burden on reviewers and the subsequent deterioration in review quality. + +Authors at AI conferences increasingly report receiving short, vague reviews with criticisms like 'not novel' or 'not state-of-the-art (SOTA)' [7]. At the 2023 Association for Computational Linguistics meeting, + +authors flagged $12.9\%$ of reviews for poor quality, primarily due to these vague, surface-level criticisms [8]. The peer review system is further strained by reviewers being assigned papers outside their expertise [9] and the same papers being reviewed multiple times due to high rejection rates [1]. Additionally, the 2014 NeurIPS Experiment highlighted inconsistencies in the peer review process by showing that approximately $25\%$ of paper acceptance decisions differed between two independent review committees [10]. These issues not only frustrate authors but potentially allow weaker research to be accepted while strong work is rejected, ultimately preventing papers from reaching their full potential due to the decline of meaningful dialogue between reviewers and authors. + +Large language models (LLMs) [11] have the potential to enhance the quality and usefulness of peer reviews for authors [12]. Recent studies demonstrated that LLMs can serve as effective critics, generating detailed and constructive feedback [13, 14]. Furthermore, LLMs have already shown high utilization in the peer review process. Reviewers are increasingly turning to LLMs to assist in drafting their reviews, with an estimated $10.6\%$ of reviewers at ICLR 2024 using LLMs for this purpose [15, 16]. + +To explore how LLMs can improve review quality at scale, we introduce Review Feedback Agent, a multi-LLM system designed to enhance the clarity and actionability of reviews by providing feedback to reviewers. Piloted at ICLR 2025 as a large randomized control study, our agent provided feedback to over 20,000 randomly selected reviews (representing half of all ICLR 2025 reviews) over four weeks from October 15 to November 12, 2024. The generated feedback primarily focused on minimizing instances of vague and unjustified comments while also addressing content misinterpretations and unprofessional remarks. Using Claude Sonnet 3.5 as the backbone [11], we created a system of five LLMs that collaborated to generate high-quality feedback. To enhance the system's reliability against potential errors or failures in instruction-following [17, 18], we developed a set of reliability tests to evaluate specific qualities of the generated feedback; the feedback was only posted if it passed all of these tests. + +Summary of main findings. Of the randomly selected ICLR reviews that received AI feedback, $26.6\%$ of reviewers updated their reviews, altogether incorporating 12,222 suggestions from the feedback agent into the reviews. Blinded ML researchers labeled these revised reviews as more informative and clearer than their initial versions. Reviewers who updated after receiving feedback increased the length of reviews by an average of 80 words. Furthermore, AI feedback led to more engaged discussions during the rebuttal period, as seen through longer author and reviewer responses. We also observed that reviewers who received feedback were more likely to change their scores after the rebuttal period, which was consistent with a more engaged rebuttal process. + +In this study, we present the first large-scale deployment for using LLMs to assist peer review. By making reviews more actionable and informative, we aim to enhance the peer review experience and promote a more constructive scientific process. + +# 2 Methods + +In what follows, we first describe the review feedback experiment, including its goals and our technical setup with OpenReview. Next, we outline the architecture of our Review Feedback Agent and explain how the system was designed to meet our goals while ensuring a high level of reliability. In total, the agent automatically provided feedback to over 20,000 reviews at ICLR 2025. + +# 2.1 ICLR 2025 review feedback experiment + +Our pilot study was conducted in collaboration with ICLR 2025 and OpenReview. As one of the world's fastest-growing AI conferences, ICLR receives thousands of paper submissions yearly; in 2025, ICLR received 11,603 submissions. Each submission is assigned an average of 4 reviewers, and all reviews are standardized to include the same sections: summary, strengths, weaknesses, and questions. Furthermore, reviewers provide scores on a scale of 1 (low) to 10 (high), rating the paper according to the following categories: soundness, presentation, contribution, rating, and confidence. + +Goal: Our goal was to enhance review quality and, in particular, reduce low-information content reviews. Toward this goal, we identified three categories of common issues in reviews that we hoped to improve by providing LLM-generated feedback. The common issues are: 1) vague or generic critiques in reviews (the + +![](images/cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg) +A + +![](images/53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg) +B + +![](images/92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg) +C +Figure 1: (A) Randomized controlled study setup. Before the start of the review period, we randomly assigned all submissions to one of three groups to determine how many of its reviews received feedback: none, half, or all. When a review selected to receive feedback was submitted, the agent generated and posted feedback after 1 hour. Reviewers could update their review, optionally, based on the feedback until the end of the review period, which ran from October 14 to November 12, 2024. (B) Feedback categories. Our system is designed to address three main types of review comments. Here, we provide examples of comments that would receive feedback from our agent, as well as examples of the generated feedback. (C) Review Feedback Agent. Our system consists of five LLMs (Actors, Aggregator, Critic, and Formmatter). Two parallel Actors generate the initial feedback, then pass it to the Aggregator, the Critic, and finally the Formmatter. Finally, the feedback is passed through the reliability tests; upon successfully passing, the feedback is posted on a review. We provide examples of comments and feedback given to those comments by our system. + +feedback asks the reviewers to be more specific and actionable); 2) questions or confusions that could be addressed by overlooked parts of the paper (the feedback highlights relevant sections); and 3) unprofessional statements in the review (the feedback asks the reviewer to rephrase). For each comment in a review, the Review Feedback Agent determined if it fell into any of these problematic categories and, if so, provided feedback on that specific review comment. + +Experimental setup: We set up this experiment as a Randomized Control Trial (RCT) to enable us to make causal inferences about how receiving feedback influences the peer review process. Before the beginning of the review period, we randomly split papers into one of three equal groups (see Figure 1A): + +1. No reviews for this paper will receive feedback, +2. Half of the reviews for this paper will be randomly selected to receive feedback, +3. All reviews for this paper will receive feedback. + +For reviews randomly assigned to receive feedback, the Review Feedback Agent, wrapped in an API, was automatically triggered when a reviewer first submitted their review on OpenReview. We delayed the feedback generation by one hour after a review was initially submitted to allow reviewers time to make any small edits (e.g., typo corrections). See Figure 1A for an example timeline. The agent posted feedback to reviews through the OpenReview interface by replying to reviews with the feedback wrapped in a comment. See Figure 2 for an example of what feedback looked like on the OpenReview website. + +![](images/fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg) +Figure 2: OpenReview interface. Here, we provide an example of feedback posted to a review on the OpenReview website (with consent from the reviewer). Feedback is only visible to the reviewer and the ICLR program chairs and was posted roughly one hour after the initial review was submitted. + +The agent only provided feedback on the initial review, and there was no subsequent interaction between the reviewer and the feedback system after that time point. The feedback is only visible to the reviewer + +and the ICLR program chairs; it was not shared with other reviewers, authors, or area chairs and was not a factor in the acceptance decisions. Reviewers were informed that the feedback was generated by a LLM and could choose to ignore the feedback or revise their review in response, as the system did not make any direct changes. Finally, we did not access or store any identifiable information about authors or reviewers. This study was reviewed by IRB and deemed low risk. + +Statistics: Around $50\%$ of reviews were randomly selected to receive feedback. Of the 44,831 reviews submitted on 11,553 unique papers (we excluded desk-rejected submissions), we posted feedback to 18,946 reviews $(42.3\%)$ over 4 weeks from October 15 to November 12, 2024 (see Figure 2A). Less than $8\%$ of the selected reviews did not receive feedback for one of two reasons: 2,692 reviews were originally well-written and did not need feedback, while 829 reviews had feedback that failed the reliability tests. Each review took roughly one minute to run through our entire pipeline and cost around 50 cents. On average, each review that received feedback was given 3-4 feedback comments, with a minimum of 1 and a maximum of 17. + +# 2.2 Review Feedback Agent + +The Review Feedback Agent aimed to provide feedback that helped reviewers make their comments more specific, constructive, and actionable for the authors. + +Feedback categories: The Review Feedback Agent provided suggestions on three potential categories of issues in reviews. We curated these categories by examining reviewer guidelines from several AI conferences [19, 20, 21, 22] and evaluating previously identified patterns of "lazy reviewer thinking" [7]. We also took inspiration from the ARR guidelines, where 16 common reviewer heuristics are outlined [23]. Importantly, the agent was not designed to suggest new ideas to add to the review; rather, it only focused on revising the existing ideas and preventing lower-quality reviews. The target feedback areas that we ultimately focused on were: + +1. Improving specificity: Encouraging reviewers to rephrase vague review comments, making them more specific, actionable, and justified for the authors. +2. Addressing misunderstandings: Highlighting sections of the paper that may already address some of the reviewer's questions or confusion. +3. Reducing unprofessional remarks: Identifying and addressing unprofessional or inappropriate remarks in the review. A 2019 study of 1,106 researchers found that $58\%$ had received an unprofessional review, highlighting its prevalence [24]. + +See Figure 1B for examples of real reviewer comments (from ICLR 2024 reviews and public journal reviews) in each category that would receive feedback and examples of feedback that would be given. + +Preprocessing: The agent was provided with the paper PDF's text (extracted using pypdf's PDFReader [25]) and the review text as input. We extracted the summary, strengths, weaknesses, and questions sections from the review. We did not provide the agent with any of the scores the reviewer initially gave the paper. + +Architecture: The agent generated a list of pairs, with each pair consisting of a review comment that fit into one of the problematic categories above and the corresponding feedback provided for that comment. The agent was composed of a pipeline of five LLMs (see Algorithm 1, Figure 1C). We used the Claude Sonnet 3.5 (June 20, 2024) model [11] as the backbone; we picked the backbone model by generating feedback with the same prompt using GPT-4o, Gemini 1.5 Flash, and Claude Sonnet 3.5 and then conducting a blind preference evaluation. Additionally, through testing, we found that one LLM was insufficient to generate high-quality feedback and format it correctly, thus, we instantiated the multi-call pipeline. First, we defined two parallel actor LLMs to generate the initial set of feedback based on the previously defined target areas. The actors were provided with the initial review $(R)$ and paper text $(P)$ as inputs. We used two separate actors to optimize for feedback diversity. Then, we passed the two lists of feedback, $F_{1}$ and $F_{2}$ , to an aggregator LLM, which merged the lists into one set of feedback, $F_{combined}$ . Next, we passed this candidate list to a critic LLM responsible for ensuring the feedback was accurate and clear. Importantly, the critic also removed any feedback that was too superficial or nitpicky, defined through various in-context examples (see Appendix A for the examples), as we did not want to overwhelm or annoy reviewers. Finally, aformatter LLM was provided with this final list, $F_{filtered}$ , and formatted it into pairs: + +- **Reviewer comment:** a comment +- **Feedback to the reviewer:** feedback to the comment + +Algorithm 1 Review Feedback Agent +1: Input: Paper text $P$ , Review $R$ , max attempts $T = 2$ +2: for $t = 1$ to $T$ do +3: $F_{1} \gets \mathrm{Actor}_{1}(P, R)$ +4: $F_{2} \gets \mathrm{Actor}_{2}(P, R)$ +5: $F_{combined} \gets \mathrm{Aggregator}(F_{1}, F_{2})$ +6: $F_{filtered} \gets \mathrm{Critic}(F_{combined})$ +7: $F_{final} \gets \mathrm{Formatter}(F_{filtered})$ +8: if PassReliabilityTests(Ffinal) then return $F_{final}$ +9: end if +10: end for +11: return error + +See Appendix A for the exact prompts used. To refine this system, we constructed a test set of 50 ICLR 2024 reviews we perceived to be of low quality in one or more of our target areas (i.e., they made vague comments, asked questions that were present in the paper already, and/or made unprofessional remarks). We iteratively ran our agent on this test set, examined the generated feedback, and refined the prompts to optimize the results. This procedure ultimately led to prompts that produced high-quality feedback for all 50 reviews in the test set. + +Reliability testing: Inspired by [26], we also developed a suite of reliability tests designed to act as guardrails, ensuring the quality of our generated feedback. Reliability tests evaluate specific attributes of a model's output. The four reliability tests we developed ensured the feedback provided constructive suggestions, addressed the reviewer, did not simply restate what the reviewer wrote, and was formatted correctly. We provide the exact reliability tests we used and examples of feedback that would fail the reliability tests in Appendix B. We developed up to five test cases for each reliability test and refined the reliability test prompts until we passed all the test cases. To refine our Review Feedback Agent's pipeline and prompts, we passed our test set reviews through the validated reliability tests until we achieved a $100\%$ pass rate. + +Feedback was only posted to a review if it passed all our reliability tests; if it failed, we re-ran the entire pipeline a second time ( $T = 2$ ) to generate new feedback. Upon a second fail, we returned an error and did not post the feedback. Over $96\%$ of generated feedback for ICLR 2025 reviews passed all reliability tests. + +# 3 Results + +# 3.1 Impact of feedback on review updates and reviewer engagement + +First, we aimed to objectively measure how many reviewers updated their reviews after receiving feedback compared to those who did not receive feedback. This enabled us to assess how the feedback may have been associated with changes in various components of their review, such as length and scores. + +We conducted this ICLR experiment as a randomized controlled study by randomly splitting all reviews into one of two groups: not selected to receive feedback (control group) or selected to receive feedback (feedback group) - see Section 2 for more details. Note that the group selected to receive feedback includes the $7.9\%$ of reviews that were selected but did not actually receive feedback, mostly because AI deemed feedback not necessary there. This intent-to-treat definition of the feedback group enables us to conduct causal analysis but could dilute the actual effect of the feedback. + +Of all reviews in the feedback group, we further defined reviews that successfully received feedback as either being not updated or updated. A review is not updated if a reviewer did not edit their review after receiving feedback or if the edit distance between the initial and modified review was less than 5; this edit distance filtering accounted for minor updates such as fixing typos or modifying scores. Conversely, a review + +![](images/c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg) +A +Figure 3: (A) Feedback statistics. Among all ICLR 2025 reviews, 22,467 were randomly selected to receive feedback (feedback group), and 22,364 were randomly selected not to receive feedback (control group). Of those selected to receive feedback, 18,946 $(42.3\%)$ successfully received feedback, with $26.6\%$ of those reviewers updating their reviews. (B) Update rates. (Top) Most reviews were submitted 2-3 days before the review deadline (November 4, 2024). (Bottom) Reviewers were more likely to update their review if they submitted it early relative to the deadline. Reviewers who received feedback were much more likely to update their reviews than those in the control group, with a difference of approximately 17 percentage points. (C) Average change in review length (measured as number of words). Review length is measured only for the following sections: summary, strengths, weaknesses, and questions. The difference in review length between the control and feedback groups is statistically significant $(^{**}\mathrm{p} \leq 0.01)$ , with being selected to receive feedback leading to an average increase of 14 words more (a $200\%$ increase) in review length compared to the control group. The difference is more pronounced between the not-updated and updated groups $(^{***}\mathrm{p} \leq 0.001)$ . + +![](images/e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg) +B + +C + +
ControlFeedbackNot updatedUpdated
Average change in length7.021.0** (Δ + 200%)2.180.3***
+ +is updated if a reviewer did edit their review after receiving feedback and the edit distance between the initial and modified review was greater than 5. + +Of the 18,946 reviews that successfully received feedback, 5,031 (26.6%) reviews were updated (Figure 3A). Out of the 22,364 reviews in the control group, only 2,103 (9.4%) were updated; here, we define updated for the control group as a reviewer updating at least one hour after posting (the time it takes for the feedback group to receive feedback) with an edit distance greater than 5. With an update rate difference of roughly 17 percentage points (Figure 3B), we can see that reviews that received feedback were much more likely to be updated than those that did not. + +In Figure 3B, we also see that reviewers who submitted early relative to the deadline (November 4, 2024) were more likely to update their review than those who submitted close to or after the deadline. This suggests that more organized reviewers, who may already be more engaged in the review process, were more likely to revise their reviews in response to feedback. While this will influence our analysis comparing the not updated and updated groups, we can be confident that the underlying distribution of the control and feedback groups is similar and not biased by factors such as reviewer organization because we conducted this as an RCT. Randomization helps mitigate such biases, making it possible to assess the causal impact of the feedback on the peer review process. + +Finally, we analyzed the change in review length (number of words in the summary, strengths, weaknesses, and questions sections) among the groups (Figure 3C). We compared the initial review length and the modified review length; we refer to modified reviews as the review at the end of the four-week review period before the rebuttal period began (only these modified reviews are made public to authors). We saw that review length, on average, increased across all groups. First, we observed that being selected to receive feedback caused the average review length to increase by about 14 words more than reviews that were not selected to receive feedback. Note that this effect size is deflated due to the substantial number of reviewers who received feedback but did not update their review, as well as the $7.9\%$ of reviews that were selected to receive feedback but did not actually get it. We also see that updating the review after receiving feedback is associated with a statistically significant increase in review length (80 words) compared to not updating the review (2 words). We can infer that reviewers who updated their reviews were editing them more consistently to incorporate more detail and nuance, explaining this large increase in length. In addition to feedback causing an increase in review length, we also found that a significantly higher percentage of reviewers who received feedback edited at least one of their scores (soundness, presentation, contribution, rating, and confidence) during the review period, with $8.1\%$ of them making edits compared to $7.5\%$ among the control group $(p \leq 0.05)$ . In Appendix C, we observe no significant difference in the average score changes between the feedback and control groups. + +# 3.2 Measuring how much feedback reviewers incorporate + +Of the reviewers that updated their review, we wanted to measure what proportion of them incorporated one or more pieces of feedback they were provided. This analysis helped us estimate how many reviewers found the feedback useful. + +We counted a piece of feedback as incorporated if the reviewer clearly integrated some part of the feedback into their modified review. To systematically carry out this analysis, we developed an LLM-based pipeline to run on all updated reviews (see Supplementary Figure S2A). We used the Claude Sonnet 3.5 model to evaluate whether each feedback item received by a reviewer was incorporated into their modified review. See Appendix D for our approach to validating this pipeline. Of the 5,031 reviews that reviewers updated, encompassing 18,322 total feedback items, $89\%$ of reviewers incorporated at least one piece of feedback. This represents $23.6\%$ of all reviewers who received feedback (Figure 4A). In total, we estimate that 12,222 feedback items were incorporated into revised reviews. We also examined the number of feedback items reviewers who updated their reviews received compared to how many feedback items they incorporated (Figure 4B). We see that when reviewers receive fewer feedback items, they are more likely to incorporate more (or even all) of the items. Overall, the average reviewer who updated their review incorporated $69.3\%$ of the feedback they received; in other words, given 3 pieces of feedback, the average reviewer who updated their review incorporated 2 of them. + +Below are a few examples of realincorporations reviewers made based on their feedback. We provide the initial review comment they posted, the feedback they received, and then their modified comment. + +![](images/39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg) +A + +![](images/574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg) +B +Of 12,222 total feedback items incorporated: +Figure 4: (A) Overall incorporation statistics. Through our LLM-based incorporation analysis, we estimate that $23.6\%$ of reviewers who were given feedback incorporated at least one feedback item they were given. This means that $89\%$ of reviewers who updated their review after receiving feedback incorporated at least one item. (B) Feedback incorporation trends. Here, we illustrate the relationship between the number of feedback items reviewers who updated their review received and how many of those items they incorporated. In total, reviewers incorporated 12,222 feedback items. Notably, reviewers were more likely to incorporate feedback when given fewer items. + +# Incorporation example: Feedback that encourages specificity + +Original comment: Theorems 1, 2, 3 require the notion of interpolation, which is considered a strong assumption to have. + +Feedback: To make this critique more actionable, it would be helpful to elaborate on why the interpolation assumption is problematic in this context. Are there particular types of federated learning problems where this assumption is unlikely to hold? Providing concrete examples or suggestions for how the authors could relax or justify this assumption would give them clearer guidance on addressing this limitation. + +Revised comment: Theorems 1, 2, 3 require the notion of interpolation. Even though an explanation of regimes that satisfy this condition is provided, considering that there are previous works [1], [2] that extend beyond that setting, this assumption seems to be an avenue for future work in this field. More specifically, the initial FedProx algorithm [1] is analyzed in the general non-interpolated setting. In addition, the follow-up work regarding the FedExProx algorithm [2] considers in the main paper the interpolated regime. However, the authors provide additionally an illustration of the algorithm's behaviour in the non-interpolated setting (see Appendix F.3 in [2]). In that sense, it would be useful to provide some additional details on the behaviour of the algorithm in the non-interpolated setting or to comment on the main challenges in extending the current proof technique beyond the interpolation framework, offering in that way a more complete picture and direction for future research. + +# Incorporation example: Feedback that identifies content oversight + +Original reviewer comment: How is the performance on Llama? + +Feedback to the reviewer: The paper appears to provide results on Llama 2-7B in Table 3. Does this address your question? If not, could you clarify what specific aspects of performance on Llama you're interested in that aren't covered by the current results? + +Modified reviewer comment: How is the performance on Llama3 with the alpaca dataset? + +# Incorporation example: Feedback that makes review more actionable + +Original reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification. + +Feedback to the reviewer: To make this comment more actionable, consider asking the authors to discuss potential extensions or challenges in applying their approach to other visual tasks. For example: 'How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?' + +Modified reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification. How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains? + +Finally, we wanted to assess whether these incorporated reviews were clearer, more specific, and more actionable for authors. To conduct this analysis, we asked two human AI researchers to conduct a blind preference evaluation between the initial and modified pre-rebuttal reviews. Specifically, we focused on reviews in the updated group that received 3-4 feedback items (the average number given) where the proportion of incorporated feedback exceeded 0.60. This threshold was chosen because the average incorporation rate was $67\%$ , and we aimed to assess whether an average updated review with incorporated feedback was perceived as an improvement. Human annotators preferred modified reviews $89\%$ of the time (out of 100 examples), indicating that reviewers who incorporated feedback consistently produced higher-quality reviews. + +# 3.3 Influence of feedback on rebuttals and decisions + +We next analyzed the impact of being selected to receive feedback on the rebuttal process and decision outcomes. The rebuttal period took place over three weeks between November 12 and December 4, 2024, and was a time when authors could respond to their reviewer's comments as they revised their papers. We examined how the feedback causally impacted different engagement measures during the rebuttal period. + +
ControlFeedbackNot updatedUpdated
Average length of author rebuttal807855*** (Δ + 6%)840896***
Average length of reviewer replies110116*** (Δ + 5.5%)115129***
+ +Table 1: Average change in rebuttal and reply length (measured as number of words). We observe that being selected to receive feedback causally increased the length of author rebuttals by an average of 48 words $(6\%;^{**}\mathrm{p}\leq 0.001)$ for reviews written by reviewers who were selected to receive feedback, compared to those who were not. We also see that the average length of reviewer replies to author rebuttals is significantly longer among those who were selected to receive feedback, with an average increase of 6 words $(5.5\%;^{**}\mathrm{p}\leq 0.001)$ . + +In the first row of Table 1, we observed that authors posted rebuttals that were, on average, $6\%$ longer (48 words) to reviews written by reviewers who were selected to receive feedback, which is significantly longer than those posted to reviews in the control group. In other words, authors were generally more engaged when + +their reviewer was selected to receive feedback. This could be because the feedback led to clearer and more actionable reviews, allowing authors to more effectively address and respond to the reviewer's comments with more detailed rebuttals. In the second row of Table 1, we also saw that reviewers who were selected to receive feedback responded to these rebuttals with replies that were, on average, $5.5\%$ longer (6 words) than those who were not selected, again highlighting increased engagement among reviewers if they were in the feedback group. + +This increased engagement is reflected in the percentage of reviewers who edited one or more of their scores for a paper during the rebuttal period. We found that $31.7\%$ of reviewers who received feedback edited their scores, compared to $30.6\%$ of those who did not, consistent with receiving feedback being associated with greater reviewer-author engagement. Overall, these findings lead us to conclude that authors were better able to address their reviewers' original concerns during the rebuttal period if their reviewer was selected to receive feedback, leading to more engagement and satisfaction among both groups. + +Finally, we evaluated whether papers with reviews that were selected to receive feedback had a different acceptance rate than those that were not. We compared the acceptance rates of the control and feedback groups, defining the control group as all papers where no reviews were selected to receive feedback and the feedback group as those where at least one review was selected to receive feedback. While there was a slightly higher acceptance rate of $32.3\%$ among papers in the feedback group, compared to $30.8\%$ among the control group, this difference was not statistically significant. This indicates that while receiving feedback promoted more engaged and thorough discussions among reviewers and authors, it did not substantially change acceptance rates. + +# 3.4 Clustering analysis of the feedback comments + +![](images/e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg) +A + +![](images/7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg) +B + +![](images/4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg) +Figure 5: (A) Feedback clusters. We used an LLM to group all the feedback items we provided to reviewers into five distinct clusters based on the text. We found that nearly half of the feedback was directed at asking the reviewer to 'clarify methodological concerns to make their request specific and actionable.' The next most popular cluster was feedback asking the reviewer to 'clarify their request by adding specific analyses, baselines, or references.' (B) Incorporation rate by cluster. We measured the percentage of feedback items within each cluster that reviewers incorporated. Overall, $17.7\%$ of all feedback was incorporated. When examined by cluster, incorporation rates ranged from $14\%$ to $18\%$ , with no statistically significant differences observed. + +To gain more insights into what types of feedback were provided by the AI agent, we present a quantitative clustering analysis of the feedback generated. Of the 69,836 total feedback items the agent gave to reviewers, we characterized the feedback by generating distinct clusters. Inspired by [27, 28], we generated five clusters on 1000 randomly sampled feedback items using an LLM; we repeated this process twice to ensure robustness. Of the 10 candidate clusters, we prompted a second LLM to select five non-overlapping clusters, giving us the five clusters we present below. Finally, we prompted a third LLM to assign the feedback items to one of the five clusters so we could determine what proportion of feedback belonged to each cluster. We depict the five clusters in Figure 5A, as well as examples of feedback items that would fall into each cluster in Table 2. + +
ClusterReviewer commentFeedback to the reviewer
Clarify methodological concerns to make request specific and actionableThe experimental setup and tool flow used to conduct the experiments should be described in more detail.To make this feedback more actionable, it would be helpful to specify what aspects of the experimental setup and tool flow you feel are missing. For example, are you looking for more details on the hardware specifications used for CPU/GPU/FPGA experiments, software frameworks and versions, training hyperparameters, or quantization settings?
Clarify request by adding specific analyses, baselines, or referencesAll the experiment examples are numerical and toy, and there is a large mismatch with real physical engineering systems. Therefore, I do not think they are convincing.To make this critique more constructive, you could suggest specific real-world physical systems or benchmarks that would provide a more convincing evaluation of the method. This would give the authors concrete ideas for improving their experimental validation.
Clarify suggestions to visuals or formatA few suggestions, Fig. 2 is very hard to understand. Is there another way to convey the same information which is easier to read?To make this feedback more constructive, try to articulate what specific aspects of Figure 2 are difficult to understand. You could suggest alternative visualization formats or specific elements that need clarification. This would help the authors know how to improve the figure.
Request authors expand on methodological limitations or broader implicationsCSP can only support binary labels.To make this comment more constructive, you could ask the authors to discuss the implications of this limitation for real-world applications and whether they have considered any extensions to support multi-class problems.
Clarify desired improvements to theoretical analysis or proofsHow standard are the assumptions (Assumptions C.1 and C.2) required for proving Theorems 4.1 and 4.2? It would be helpful to see more justification regarding their validity (e.g. other papers building based upon similar assumptions).This is an excellent question that probes the theoretical foundations of the paper. To make it more actionable, you could ask the authors to provide specific examples of other works in the field that rely on similar assumptions, or to explain how these assumptions compare to those typically used in related theoretical analyses. This would help contextualize the theoretical contributions within the broader literature.
+ +Table 2: Examples of AI-generated feedback that belong to each of the five main clusters. We also provide the original review comment that triggered the generation of the feedback. + +These clusters indicate that the vast majority of feedback provided was addressed towards vague review comments and aimed to make them more specific, actionable, and justified. We saw that the agent rarely chose to comment on content misunderstandings, in large part because it had to be absolutely certain there was an error and provide a direct quote from the paper highlighting the mistake as we did not tolerate any hallucinations. Therefore, we saw that the model would err on the side of caution and not provide many comments related to that category. + +We also sought to measure the percentage of feedback items within each cluster that were incorporated by reviewers, as shown in Figure 5B. Overall, out of the 69,836 feedback items given, we found that $17.7\%$ of all feedback was incorporated. On a cluster basis, we found that the 'clarify request by adding specific analyses, baselines, or references' and 'clarify desired improvements to theoretical analysis or proofs' clusters had the highest incorporation rate at $18\%$ . The 'clarify suggestions to visuals or format' cluster had the + +lowest incorporation rate at $14\%$ . Overall, we do not see statistically significant differences in incorporation rates among the clusters, implying that reviewers did not find certain categories of feedback to be more or less useful than others. + +# 4 Related Works + +Due to their extensive capabilities, LLMs are being used across every stage of the peer review process. Reviewers increasingly use LLMs to assist in drafting peer reviews [15, 29, 30]. An estimated $17.5\%$ of authors of Computer Science abstracts on arXiv [31] and $10.6\%$ of reviewers at ICLR 2024 [16] used LLMs for writing assistance. Other studies have shown the potential of LLMs to make the entire review pipeline more efficient across various stages [32, 33, 34, 35] such as writing manuscripts [36], initial quality control [37, 38, 27], and even providing AI-generated instructions for how to write reviews [39]. + +As peer review workloads continue to increase, LLMs present an opportunity to alleviate some of the burden on human reviewers by providing reviews of submitted manuscripts. In a prospective survey study, 308 researchers from 110 institutions received GPT-4-generated feedback on their papers. Of these, $57.4\%$ found the feedback helpful, and $82.4\%$ felt it was more useful than the feedback provided by at least some human reviewers [12]. Building off of this work, [40] proposed a multi-agent review generation system that improved the specificity and helpfulness of feedback provided compared to GPT-4, reducing the rate of generic comments from $60\%$ to $29\%$ . + +Furthermore, LLMs offer an efficient and possibly less biased alternative to human evaluations; [41] found that human evaluators of peer reviews were highly susceptible to bias from review length and paper score, as there were high levels of subjectivity among reviewers. These findings suggest that integrating LLMs into the review evaluation process could standardize assessments and reduce inconsistencies. As LLM-based tools continue to evolve, they hold the potential to improve both the speed and quality of manuscript evaluations. Our experiment is the first to demonstrate how LLMs can improve the peer review process on a large scale, highlighting their practical benefits. + +However, despite these advancements, no prior studies had specifically examined how LLMs could be used to provide feedback on peer reviews in the areas we focused on in our experiment. A study released after our ICLR experiment, however, introduced a benchmark to identify toxicity in peer reviews [42]. The authors identified four categories of toxic comments: using emotive or sarcastic language, vague or overly critical feedback, personal attacks, and excessive negativity. These categories align closely with the ones we chose for our agent to provide feedback on. The authors benchmarked several LLMs for detecting toxicity and tested their ability to revise toxic sentences, finding that human evaluators preferred $80\%$ of these revisions. In future iterations of our Review Feedback Agent, this benchmark could offer a valuable tool for testing our pipeline's ability to detect toxicity and offer constructive feedback. + +# 5 Discussion + +Our research demonstrates the significant potential of LLM-based systems to enhance peer review quality at scale. By providing targeted feedback to reviewers at ICLR 2025, we observed meaningful improvements in review specificity, engagement, and actionability. We saw that $27\%$ of reviewers updated their reviews, and an overwhelming majority of those who made updates incorporated at least one piece of feedback into their modifications. Blinded AI researchers found the updated reviews to be consistently more clear and informative. Furthermore, feedback intervention led to increased engagement throughout the review process, with longer reviews, rebuttals, and reviewer responses, suggesting more involved discussions between authors and reviewers. + +We designed the AI feedback system to enhance reviews while ensuring human reviewers retain complete control. First, the AI-generated feedback was purely optional, and reviewers could decide whether to incorporate it or not; by default, they could opt out by ignoring the feedback. Second, human reviewers had full control over the final review and the scores visible to the authors. To reduce the risk of hallucination, the AI feedback had to pass several rigorous reliability tests before being shared with reviewers. Finally, no personal or identifiable information about reviewers or authors was disclosed to the agent. An IRB review deemed the system to be low risk. + +Going forward, there are several directions to further improve the Review Feedback Agent. Our feedback categories focused on three main areas (improving specificity, addressing misunderstandings, and ensuring professionalism). While these categories were derived from reviewer guides and previous studies and encompass the majority of author complaints, they may not capture all aspects of review quality. Expanding to other categories would be helpful. Additionally, it would be interesting to explore the use of reasoning models to generate more nuanced feedback for complex issues in reviews. Finally, the concept of developing reliability tests for LLMs is an evolving field, with new studies emerging after our experiment [43, 44], and we hope to incorporate ideas from these recent works to improve the robustness of our framework. Ultimately, we expect that running this agent at future AI conferences across a diverse range of research topics will improve its robustness and effectiveness. + +CS conferences have long leveraged machine learning to enhance their peer review processes. One early example is the Toronto Paper Matching algorithm, which was used in NIPS 2010 to match papers with reviewers and has since been deployed by over 50 conferences [45]. However, the impact of many of these earlier applications of machine learning has not been rigorously quantified. To address this gap, we were motivated to conduct this randomized controlled study to rigorously evaluate the effects of review feedback before broader deployment. Our findings show that by striving to make reviews more informative for authors, the Review Feedback Agent has the potential to enhance the overall quality of scientific communication. As LLM capabilities continue to advance, we anticipate even more advanced systems that can provide tailored feedback to reviewers, ultimately benefiting the entire scientific community through improved peer review. + +# Acknowledgements + +We would like to thank Celeste Martínez and Carlos Mondragon Chapa at OpenReview for their help in integrating our agent into the OpenReview interface. We would also like to thank Alex Tamkin and Anthropic for helping us increase our rate limits. Finally, we would like to thank members of the Zou group for their support and comments on this work. + +# Author Contributions + +NT, MY, JS, and JZ designed, developed, and deployed the Review Feedback Agent, conducted analyses, and wrote the paper. AG, NP, FS, RY, and CV are program chairs of ICLR 2025 and provided guidance on the feedback study and analysis. + +# References + +[1] Bruce Alberts, Brooks Hanson, and Katrina L. Kelner. Editorial: Reviewing peer review. Science, 321(5885):15-15, 2008. +[2] Jacalyn Kelly, Tara Sadeghieh, and Khosrow Adeli. Peer review in scientific publications: benefits, critiques, & a survival guide. *Ejifcc*, 25(3):227, 2014. +[3] Publons. Global state of peer review 2018, 2018. +[4] Ariful Azad and Afeefa Banu. Publication trends in artificial intelligence conferences: The rise of super prolific authors, 2024. +[5] Alison McCook. Is peer review broken? submissions are up, reviewers are overtaxed, and authors are lodging complaint after complaint about the process at top-tier journals. what's wrong with peer review?, 2006. +[6] ICLR. Iclr 2024 press release, 2024. +[7] Anna Rogers and Isabelle Augenstein. What can we do to improve peer review in NLP? In Trevor Cohn, Yulan He, and Yang Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1256–1262, Online, November 2020. Association for Computational Linguistics. + +[8] Anna Rogers, Marzena Karpinska, Jordan Boyd-Graber, and Naoaki Okazaki. Program chairs' report on peer review at acl 2023. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages x1-lxxv, 2023. +[9] Martijn Arns. Open access is tiring out peer reviewers. Nature, 515:467, 2014. +[10] Corinna Cortes and Neil D. Lawrence. Inconsistency in conference peer review: Revisiting the 2014 neurips experiment, 2021. +[11] Anthropic. Claude 3.5 sonnet, 2024. +[12] Weixin Liang, Yuhui Zhang, Hancheng Cao, Binglu Wang, Daisy Yi Ding, Xinyu Yang, Kailas Vodra-halli, Siyu He, Daniel Scott Smith, Yian Yin, et al. Can large language models provide useful feedback on research papers? a large-scale empirical analysis. NEJM AI, 1(8):AIoa2400196, 2024. +[13] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Zhi Huang, Carlos Guestrin, and James Zou. Textgrad: Automatic "differentiation" via text, 2024. +[14] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback, 2023. +[15] Mohammad Hosseini and Serge P J M Horbach. Fighting reviewer fatigue or amplifying bias? considerations and recommendations for use of chatgpt and other large language models in scholarly peer review. Research Integrity and Peer Review, 2023. +[16] Weixin Liang, Zachary Izzo, Yaohui Zhang, Haley Lepp, Hancheng Cao, Xuandong Zhao, Lingjiao Chen, Haotian Ye, Sheng Liu, Zhi Huang, Daniel A. McFarland, and James Y. Zou. Monitoring ai-modified content at scale: A case study on the impact of chatgpt on ai conference peer reviews, 2024. +[17] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219, 2023. +[18] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023. +[19] ICML 2023 program committee. Icml 2023 reviewer tutorial, 2023. +[20] ICML 2022 Program Chairs. How to be a good reviewer? reviewer tutorial for icml 2022, 2022. +[21] ACL PC Chairs. Last minute reviewing advice, 2017. +[22] Matias Valdenegro. Lxcv @ cvpr 2021 reviewer mentoring program: And how to write good reviews, 2021. +[23] Isabelle Augenstein Anna Rogers. Arr reviewer guidelines, 2021. +[24] Nyssa J Silbiger and Amber D Stubler. Unprofessional peer reviews disproportionately harm underrepresented groups in stem. PeerJ, 7:e8247, 2019. +[25] Mathieu Fenniak, Matthew Stamy, pubpub zz, Martin Thoma, Matthew Peveler, exiledkingcc, and pypdf Contributors. The pypdf library, 2024. +[26] Marco Tulio Ribeiro and Scott Lundberg. Testing language models (and prompts) like we test software, 2023. + +[27] Alexander Goldberg, Ihsan Ullah, Thanh Gia Hieu Khuong, Benedictus Kent Rachmat, Zhen Xu, Isabelle Guyon, and Nihar B. Shah. Usefulness of llms as an author checklist assistant for scientific papers: Neurips'24 experiment, 2024. +[28] Alex Tamkin, Miles McCain, Kunal Handa, Esin Durmus, Liane Lovitt, Ankur Rathi, Saffron Huang, Alfred Mountfield, Jerry Hong, Stuart Ritchie, Michael Stern, Brian Clarke, Landon Goldberg, Theodore R. Sumers, Jared Mueller, William McEachen, Wes Mitchell, Shan Carter, Jack Clark, Jared Kaplan, and Deep Ganguli. Clio: Privacy-preserving insights into real-world ai use, 2024. +[29] Ryan Liu and Nihar B. Shah. Reviewergpt? an exploratory study on using large language models for paper reviewing, 2023. +[30] Som Biswas, Dushyant Dobaria, and Harris L. Cohen. Chatgpt and the future of journal reviews: A feasibility study. The Yale Journal of Biology and Medicine, 96(3):415-420, 2023. +[31] Weixin Liang, Yaohui Zhang, Zhengxuan Wu, Haley Lepp, Wenlong Ji, Xuandong Zhao, Hancheng Cao, Sheng Liu, Siyu He, Zhi Huang, Diyi Yang, Christopher Potts, Christopher D Manning, and James Y. Zou. Mapping the increasing use of llms in scientific papers, 2024. +[32] Nihar B. Shah. Challenges, experiments, and computational solutions in peer review. Commun. ACM, 65(6):76-87, May 2022. +[33] Simon Price and Peter A. Flach. Computational support for academic peer review: a perspective from artificial intelligence. *Commun. ACM*, 60(3):70-79, February 2017. +[34] Atreyi Kankanhalli. Peer review in the age of generative ai. Journal of the Association for Information Systems, 25(1), 2024. +[35] Ilia Kuznetsov, Osama Mohammed Afzal, Koen Dercksen, Nils Dycke, Alexander Goldberg, Tom Hope, Dirk Hovy, Jonathan K. Kummerfeld, Anne Lauscher, Kevin Leyton-Brown, Sheng Lu, Mausam, Margot Mieskes, Aurélie Néveol, Danish Pruthi, Lizhen Qu, Roy Schwartz, Noah A. Smith, Thamar Solorio, Jingyan Wang, Xiaodan Zhu, Anna Rogers, Nihar B. Shah, and Iryna Gurevych. What can natural language processing do for peer review?, 2024. +[36] Tiffany I Leung, Taiane de Azevedo Cardoso, Amaryllis Mavragani, and Gunther Eysenbach. Best practices for using ai tools as an author, peer reviewer, or editor. J Med Internet Res, 25:e51584, Aug 2023. +[37] Alessandro Checco, Lorenzo Bracciale, Pierpaolo Loreti, Stephen Pinfield, and Giuseppe Bianchi. AI-assisted peer review. Humanities and Social Sciences Communications, 2021. +[38] Kayvan Kousha and Mike Thelwall. Artificial intelligence to support publishing and peer review: A summary and review. Learned Publishing, 37(1):4-12, 2024. +[39] Xiaotian Su, Thiemo Wambsgangss, Roman Rietsche, Seyed Parsa Neshaei, and Tanja Käser. Reviewwriter: AI-generated instructions for peer review writing. In Ekaterina Kochmar, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Nitin Madnani, Anaïs Tack, Victoria Yaneva, Zheng Yuan, and Torsten Zesch, editors, Proceedings of the 18th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2023), pages 57–71, Toronto, Canada, July 2023. Association for Computational Linguistics. +[40] Mike D'Arcy, Tom Hope, Larry Birnbaum, and Doug Downey. Marg: Multi-agent review generation for scientific papers, 2024. +[41] Alexander Goldberg, Ivan Stelmakh, Kyunghyun Cho, Alice Oh, Alekh Agarwal, Danielle Belgrave, and Nihar B. Shah. Peer reviews of peer reviews: A randomized controlled trial and other experiments, 2024. + +[42] Man Luo, Bradley Peterson, Rafael Gan, Hari Ramalingame, Navya Gangrade, Ariadne Dimarogona, Imon Banerjee, and Phillip Howard. Benchmark on peer review toxic detection: A challenging task with a new dataset, 2025. +[43] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests, 2024. +[44] Archiki Prasad, Elias Stengel-Eskin, Justin Chih-Yao Chen, Zaid Khan, and Mohit Bansal. Learning to generate unit tests for automated debugging, 2025. +[45] Laurent Charlin, Richard S Zemel, and Craig Boutilier. A framework for optimizing paper matching In UAI, volume 11, pages 86-95, 2011. + +# Appendices + +# A Agent Prompts + +We manually fine-tuned the following prompts for the LLMs in the Review Feedback Agent. We provide the prompts below: + +# Actor Prompt + +Here is the paper: {paper} . Here is the peer review: {review} . + +# Actor System Prompt + +You are given a peer review of a machine learning paper submitted to a top-tier ML conference on OpenReview. Your task is to provide constructive feedback to the reviewer so that it becomes a high-quality review. You will do this by evaluating the review against a checklist and providing specific feedback about where the review fails. + +Here are step-by-step instructions: + +1. Read the text of the review and the paper about which the review was written. +2. Evaluate every comment in the review: + +- Focus on comments related to weaknesses of the paper or questions the reviewer has. Ignore any comments that are summaries of the paper or that discuss strengths of the paper. +- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important. +- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important. +- For each comment, evaluate it against the following checklist. Follow the examples for how to respond. Importantly, you should be as helpful as possible. Do no ask superficial questions or make superficial remarks, think deeply and exhibit your understanding. +- Most reviewer comments are already sufficiently clear and actionable. Only focus on the ones that clearly fail the checklist items below. +- Checklist: +(a) Check if the reviewer requests something obviously present in the paper. Only respond if certain of the reviewer's error. If so, politely pose a question to the reviewer with + +something like "Does the following answer your question...?" quote the relevant paper section verbatim using tags. Use only exact quotes and do not comment if uncertain. + +The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment: + +- Example 1: + +* Reviewer comment: In Figure 4, the efficiency experiments have no results for Transformer models, which is a key limitation of the paper. +* Feedback to the reviewer: Does Figure 5 of the paper answer your question? In particular: In Transformers, the proposed technique provides $25\%$ relative improvement in wall-clock time (Figure 5) . + +- Example 2: + +* Reviewer comment: The authors propose a new deep learning model for predicting protein-protein interactions but don't explain how they address the class imbalance in PPI datasets. Most protein pairs don't interact, creating an imbalance between positive and negative samples. It's unclear how the model balances sensitivity and specificity, which is important for systems biology applications. +* Feedback to the reviewer: Does section 3.3 of the paper address your concern? Specifically, the following passage: To address the class imbalance in PPI datasets, where non-interacting pairs are far more common, we employ a "Balanced Interaction Learning" (BIL) approach. This involves using a focal loss function to reduce the influence of easy negatives, balanced minibatch sampling to ensure a mix of positive and negative samples, and a two-stage training process with pre-training on a balanced subset before fine-tuning on the full dataset . + +- Example 3: + +* Reviewer comment: Lack of theoretical analysis of the communication complexity of the proposed method. In distributed optimization, communication complexity is crucial for minimizing inter-node communication to enhance system efficiency and reduce communication costs. +* Feedback to the reviewer: The paper appears to provide a theoretical analysis of communication complexity. Specifically, Theorem 3.6 states an $\mathrm{O}(\sqrt{\kappa_{max}}\log (1 / \epsilon))$ communication complexity bound. Does this address your concern? Are there specific aspects of communication complexity analysis you feel are missing? + +(b) Look for any vague or unjustified claims in the review. This results in points that are not actionable or harder to respond to. For such cases, we would like to nudge the reviewer to provide more specific details and justify their claim. + +First, let us define what it means for a comment to be actionable and specific enough. + +There are a few pieces of criteria we will use to determine this: + +i. The review comment specifies the section, paragraph, figure, or table where the issue occurs. +ii. The issue or concern in the review comment is explicitly stated, avoiding vague language. +iii. The comment explains why the identified issue is problematic and needs addressing. + +iv. The reviewer provides concrete examples: + +A. At least one example of what they find unclear or problematic. +B. At least one example or suggestion of what would address their concern (e.g., specific metrics, experiments, or changes). + +Do NOT nitpick. Most comments are already specific and actionable, and we do not want to provide feedback on those. We do NOT want to annoy reviewers with unnecessary feedback! + +The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment: + +# - Example 1: + +* Reviewer comment: It appears that the linear mode connectivity results may be somewhat brittle. +* Feedback to the reviewer: Can you elaborate on why you see the results as brittle? It may also be helpful to describe in further detail how the authors can address your concern. For example, if you believe additional experiments or theoretical analyses are needed, it may be helpful to explicitly say so. + +# - Example 2: + +* Reviewer comment: The paper writing is not fluent enough and needs polishing to be easier to follow. +* Feedback to the reviewer: It would be helpful if you could provide specific examples of sections or sentences that are difficult to follow. This would give the authors more actionable feedback. + +# - Example 3: + +* Reviewer comment: In the proposed method, an additional optimization problem is required to solve every iteration, i.e., Eq. (11). Thus the proposed method seems inefficient since it is a nested-loop algorithm. +* Feedback to the reviewer: Your concern about efficiency is valid, but it may be helpful to describe in further detail how the authors might address your concern. For example, you could ask about the computational complexity of solving Eq. (11) compared to the overall algorithm, or request empirical runtime comparisons to existing methods. This could help the authors address the efficiency concern more concretely. + +# - Example 4: + +* Reviewer comment: The paper presents a limited number of baseline methods, and they are relatively outdated (between 2019 and 2021). Additionally, the paper lacks analytical experiments to substantiate that the proposed method has learned superior textual structural information. +* Feedback to the reviewer: To strengthen this critique, consider suggesting specific, more recent baselines that you believe should be included. Also, providing examples of analytical experiments that could effectively demonstrate superior learning of textual structural information would make this feedback more actionable for the authors. + +# - Example 5: + +* Reviewer comment: One of the assumptions of this paper is that "most GNNs perform better on homophilic graphs". I personally do not agree with it. A part of the heterophilic graphs are easy to fit, e.g., Wisconsin with $90 + \%$ accuracy, and some homophilic graphs are challenging. The difficulties of node classification on different datasets are not only related to the graph (label) homophily, but also related to the node features, and many other factors. +* Feedback to the reviewer: Your point is helpful, but it would be more actionable to ask the authors to provide evidence supporting their assumption, rather than simply disagreeing. Consider asking for specific examples or citations that demonstrate GNNs performing better on homophilic graphs. + +# - Example 6: + +* Reviewer comment: The numbers in table 1 are not described. +* Feedback to the reviewer: It would be helpful to specify what aspects of the numbers in Table 1 need more description. Are you referring to the meaning of the values, their units, or something else? This would help the authors provide a more targeted response. + +The following are examples where the reviewer's comments are already specific and, most importantly, actionable, so you should not give any feedback: + +- Reviewer comment: The paper claims occupancy is increased on Page 6 but it was unclear: (i) what definition of occupancy is being used (GPU resources could mean many things and occupancy often just refers to number of warps that can concurrently run versus max number supported by hardware); and (ii) whether any measurement has been made to confirm the claimed improvement (e.g., using NVIDIA Parallel Nsight or similar approaches for collecting performance counters). +- Reviewer comment: Second paragraph under "Semantic similarity": I felt lots of details were missing here to better understand the quality of phrases, and the feasibility of the proposed approach. The Appendix A do not provide all necessary details. Is this done on the pretraining corpus? What trivial constituents were dropped out and why (some examples would help)? +- Reviewer comment: Some works like Saycan and RT2 also consider the match of the environment and the agent ability. Key differences between the proposed method and those existing works need to be more carefully discussed. +- Reviewer comment: The problem studied, and the techniques used, are closely related to Lipshitz bandits [2], pricing [3] and bilateral trade [1]. Please consider a more thorough comparison with the already known results and techniques there. +- Reviewer comment: In Table 3, FlashFFTConv outperforms torch.fft by up to $8.7\mathrm{x}$ , while the speedup is about $2\mathrm{x}$ without the domain-specific optimizations. Does it mean the major speedup comes from the domain-specific optimizations instead of the FlashFFTConv algorithm? Could the authors conduct this ablation study (with and without the domain-specific optimizations) in other experiments? +- Reviewer comment: Then in Section 4.2, the authors propose to give the actor past actions to help it infer the state at the current step. I don't understand why is this not done by default. In my understanding, DOMDPs are POMDPs and in POMDPs, past actions and observations should always be given to the policy for optimal control. I don't see how this is an innovation. + +If a reviewer asks a question that is already clear, you do not need to give feedback on it or rephrase it. Questions need to be clear and specific, but they do not necessarily need to be actionable as they represent a reviewer's confusion. To be precise, in most cases if a comment ends in '?' you should ONLY give feedback if the question itself is unclear. + +Here are some examples of reviewer comments that are clear and specific, and therefore do not need feedback: + +- Reviewer comment: 4) In Figure 6, Spearman rank correlation scores for HCMs are reported. As far as I know, Spearman rank correlation calculates the correlation between two variables. How was the correlation computed from multiple runs in this case? +- Reviewer comment: While there are detailed information about training procedure, not much is written about the actual inference step. For instance, how many samples for each prototype are required for reliable performance? + +(c) If the reviewer claims the paper lacks novelty, ensure they specify why, including references to similar work. If they haven't, we would like to nudge the reviewer to + +justify the claim, by prompting them to provide the most relevant references, the relationships, and specifying similarities or differences. + +The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment: + +# - Example 1: + +* Reviewer comment: The paper's novelty is limited considering the ICLR standards. +* Feedback to the reviewer: It would be really helpful to the authors if you consider discussing the reasons for why the novelty is limited, and specify what ICLR standards are in this context. In particular, it would be very helpful if you give examples of the closest papers, their similarities, and differences with the methods or results in the current paper. + +# - Example 2: + +* Reviewer comment: The novelty of this work is not clear from the conclusion and experiments now. +* Feedback to the reviewer: To make this feedback more actionable, it would be helpful to specify which aspects of novelty are unclear or missing. Are there particular claims or contributions that need more justification? Providing concrete suggestions for how the authors could better highlight the novelty would give them clearer guidance. + +# - Example 3: + +* Reviewer comment: The proposed method is not innovative enough. I'm not an expert in this field, so I'm not sure about it. +* Feedback to the reviewer: It would be helpful if you could elaborate on why you think the method may not be innovative enough, even if you're not an expert. Are there specific aspects that seem similar to existing work? If you're uncertain about the novelty, it's best to phrase this as a question or area for clarification rather than a definitive weakness. For example, you could ask the authors to further explain how their approach differs from or improves upon existing methods for training vision-language models for satellite imagery. + +The following are examples where the reviewer's discussion of novelty is already detailed and actionable as written, so you should not give any feedback: + +- Reviewer comment: DASHA is a mash-up between MARINA and existing distributed nonconvex optimization methods. Other than the fact that three variants of DASHA get rid of the uncompressed synchronization in MARINA, this reviewer could not pinpoint a difference between MARINA and DASHA. As such, the main novelty of this work seems to be in terms of theoretical analysis of MARINA when the uncompressed synchronization step is removed. The authors could have done a better job of clarifying where does this novelty lie in the analysis (e.g., pinpointing the key analytical approaches in the lemma that helped improve the analysis) +- Reviewer comment: I'm not sure the paper has sufficient novelty to be published in the top-tier conference since the proposed method only goes one step further from Task Arithmetic [1] and TIES-MERGING [2] by incorporating trainable weights for task vectors. The concept seems thin to support an entire paper, with only one page (page 6) dedicated to the novel part. + +(d) Identify any personal attacks or inappropriate remarks made by the reviewer. This can be about the personality, the knowledge, or the experience of the authors. For example, they call the work "incompetent" without justifying why. For this case, we would like to kindly warn the reviewer about their comment and politely suggest they revise their language. + +The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment: + +- Example 1: + +* Reviewer comment: The authors clearly do not live in the real world and do not care about people or downstream effects of their research. +* Feedback to the reviewer: We kindly suggest you revise this comment, as it includes remarks about the personalities or intents of the authors. + +- Example 2: + +* Reviewer comment: This paper is embarrassing, and you are clearly not fit to be in research. +* Feedback to the reviewer: We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors. + +- Example 3: + +* Reviewer comment: This MC-IS method for estimating the score will NEVER work well in high dimensions due to variance and thus why works such as [1,2,3,4] which are clearly aware of this formulation (as they either state it in their appendices or use it for subsequent calculation) pursue an optimization alternative to estimating the drift. +* Feedback to the reviewer: Consider revising this comment to avoid absolute statements like "NEVER". Instead, you could phrase it as a concern about scalability to high dimensions, and ask the authors to address this limitation or provide evidence that it can work in higher dimensions. + +3. Provide feedback: + +- For each comment that fails according to the checklist, write concise feedback in the following format: + +- Comment: the verbatim comment of interest +- Feedback: your concise feedback + +- If you do not identify any issues with a comment, do not include it in your feedback list. +- If you find no issues in the review at all, respond with: 'Thanks for your hard work!' + +# Remember: + +- Be concise, limiting your feedback for each comment to 1-2 sentences. +- Do not summarize your feedback at the end or include a preamble at the beginning. +- Do not repeat anything the reviewer already included in their review, and do not praise anything the reviewer wrote as we want to provide constructive feedback. +- Your feedback will be sent to reviewers. Do not mention that you are using a checklist or guidelines. +- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer. +- Do not provide feedback to any comments that mention a score or rating. You do not care about the reviewer's score or rating for this paper. +- Do not provide feedback to any comments that discuss typos. + +# Aggregator Prompt + +Here is the paper: {paper} . + +Here are the lists of feedback: {feedbacks} . + +Here is the peer review: {review} . + +# Aggregator System Prompt + +You will be given multiple lists of feedback about a peer review of a machine learning paper submitted to a top-tier ML conference. The aim of the feedback is to guide a reviewer to make the review high-quality. Your task is to aggregate the lists of feedback into one list. + +Here are the guidelines that were followed to generate the feedback lists originally: {ACTOR_SYSTEM_CHART} + +Here are step-by-step instructions: + +1. Read the multiple feedback lists provided for that review, the text of the review, and the paper about which the review was written. +2. For all feedback lists, aggregate them into one list with the best comment-feedback pairs from each list: + +- For each comment-feedback pair in the multiple lists that are similar, determine which provides the best feedback and keep only that pair. +- If there are unique comment-feedback pairs in the multiple lists, critically determine if it is an essential piece of feedback needed to improve the review. If it is unnecessary or redundant, remove the comment-feedback pair. +- You should end up with one feedback list that has no repeated comments from the review and that is high quality. +- Return the feedback list in the format you received it in, where the pairs are formatted as: + +- Comment: {{the verbatim comment of interest}} +- Feedback: {{your concise feedback}} + +# Critic Prompt + +Here is the paper: {paper} . + +Here is the feedback: {feedback} . + +Here is the peer review: {review} . + +Remember: + +- You are a critic that will help reviewers improve their comments and reviews. Your valuable feedback will help improve their review. +- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer. + +# Critic System Prompt + +You are a critic that will help reviewers improve their reviews. You are given a list of feedback to the reviewer comments of a machine learning paper submitted to a top-tier ML conference on OpenReview. The aim of the feedback is to guide a reviewer to improve their comments and re + +view as a whole. Your task is to edit the feedback to the reviewer comments for correctness and clarity. + +Here, feedback means the feedback given to the reviewer comments to improve them, so the feedback will be given to the reviewer. + +Here are the guidelines that were followed to generate the feedback to the reviewer comments originally: {ACTOR_SYSTEM_PROMPT} . You should keep in mind to adhere to the above guidelines. + +Here are step-by-step instructions: + +1. Read the feedback list provided for reviewer comments, the full text of the review itself, and the paper about which the review was written. +2. Evaluate every piece of feedback in the feedback list: + +- For each feedback item, it is imperative that you evaluate the correctness of the feedback. If there is a quote in the feedback, ensure that the quote appears verbatim in the paper. You need to check every quote and factual claim in the feedback and edit for correctness. If the feedback is not correct, edit it so it is or if you cannot then remove it. +- For each feedback item, evaluate if it is clear. You should make sure it would not confuse or frustrate the reviewer who reads it. + +3. Remove comment-feedback pairs that are too nitpicky, unnecessary, or superficial. Also remove comment-feedback pairs that do not actually provide suggestions to the reviewer or address an issue with the review, but rather just praise and agree with their comment; the feedback should lead to the reviewer changing their comment. + +Here are some examples of comment-feedback pairs that should be entirely removed from the final feedback list: + +(a) Reviewer comment: The novelty remains concerned. It seems that the algorithm combines MLR + a bisimulation loss. + +Feedback to the reviewer: It would be helpful if you could elaborate on why you see the novelty as limited. In particular, it would be very helpful if you could discuss the key differences between simply combining MLR and bisimulation loss versus the approach proposed in this paper. This would give the authors a better chance to clarify the novelty of their work. + +(b) Reviewer comment: The paper lacks a proper related work section, which makes it challenging for readers to quickly grasp the background and understand the previous works. It is crucial to include a comprehensive discussion on related works, especially regarding the variance-reduced ZO hard-thresholding algorithm and the variance reduction aspect. + +Feedback to the reviewer: To make this comment more actionable, consider suggesting specific areas of related work that should be covered, such as key papers on ZO optimization, hard-thresholding methods, and variance reduction techniques in optimization. This will give the authors clearer guidance on what to include. + +(c) Reviewer comment: The paper is not very well-presented and is hard to follow. First of all, it is unclear in the hybrid setting considered, what are the relative relations of the guest parties? In the introduction, it appears that they share the same feature space but have different sample IDs, however, in 3.1 they appear to have different dimensions and unclear alignment. It is suggested that the paper properly define the problem setting. A figure on how data is partitioned by different parties would also help. + +Feedback to the reviewer: Your suggestion for a clearer definition of the problem setting and a visual representation of data partitioning is excellent. This would significantly improve the paper's clarity and readability. + +(d) Reviewer comment: 3) the model performance of the proposed methods still appear to be a little inferior to the centralized setting, not exactly "comparable" as claimed. It is important to understand whether the proposed method is "lossless" or "lossy" and why. I think more detailed examinations and explanations are needed here. + +Feedback to the reviewer: Your observation about the performance gap between the proposed method and the centralized setting is insightful. Requesting a more detailed analysis of whether the method is lossless or lossy, along with explanations for any performance differences, would significantly enhance the paper's contribution. + +(e) Reviewer comment: Q2: It appears that the introduced projection loss can be directly optimized with respect to the trigger $T$ . What's the rationale behind setting an upper bound and optimizing the projection loss through this bound? Does this approach offer computational benefits? + +Feedback to the reviewer: This question effectively probes the authors' methodological choices. It's a clear and concise query that could lead to valuable insights about the paper's approach. The authors' response could provide important context about the trade-offs involved in their method. + +4. Edit comments based on evaluations: + +- Do not add any new points unless the previous feedback obviously missed something important. +- If you do not identify any issues with a comment-feedback pair, do not edit it. + +5. The feedback will be shared with the reviewers for them to improve their comments. Address the reviewer in the second person (e.g., "you") and do not refer to them as "the reviewer." +6. Return the feedback list in the format you received it in, where the pairs are formatted as: + +- Comment: {{the verbatim comment of interest}} +- Feedback: {{your concise feedback}} + +# Remember: + +- You are a critic that will help reviewers improve their comments and reviews. +- Be concise, limiting your feedback for each reviewer comment to 1-2 sentences. +- Do not summarize your feedback at the end or include a preamble at the beginning. +- Do not repeat anything the reviewer already included in their review. +- Do not mention that you are using a checklist or guidelines. +- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer. + +# Formatter Prompt + +Here is the feedback for you to format: {feedback} + +# Formatter System Prompt + +You will be given a set of feedback given to various reviewer comments in a peer review of a machine learning paper. Your response, which will be the list of reviewer comments and feedback to them, will be shared with the reviewers who wrote the review, so that they can improve their reviews and the peer review cycle. + +Your task is to format the feedback into a structured format. You should format the feedback as a list of comment-feedback pairs: + +- Reviewer comment: $\{\{\mathrm{a~comment}\} \}$ +- Feedback to the reviewer: {{feedback to the comment}} +- Reviewer comment: {{another comment}} +- Feedback to the reviewer: {{feedback to the comment}} + +中 + +Your goal is to only keep feedback to the reviewers that can help them improve their comments. You should only pay attention to lines that start with "Comment" or "Feedback". + +- Only keep the comment-feedback pairs where the feedback can help improve the reviewer. If there is no suggestion for improvement, remove the entire comment-feedback pair. + +- Here is an example of a comment-feedback pair that should be removed from the final feedback list: + +* Reviewer comment: Section 2.2. "It independently formulates new approaches" → Is it a hallucination or a feature? It looks like a hallucination to me. If this is important for achieving good performance, can you provide an ablation study based on whether to allow new approaches or not? + +* Feedback to the reviewer: This is a thoughtful question about an important aspect of the methodology. Your suggestion for an ablation study is particularly valuable and could provide insights into the method's effectiveness. + +- If the feedback says "No changes needed" or something with a similar meaning, remove the entire comment-feedback pair. + +- Do not modify the content of the feedback at all, only format it into the bullet point format described above. +- The response you send will be immediately shared with the reviewers. Thus, there should be NO OTHER TEXT in the output, for example no preamble or conclusion sentences. Only respond with the list of feedback & reviewer comment bullets, and no other text. +- Since your response will immediately be sent to the reviewers, if there is no feedback, just say "Thanks for your hard work!". + +We also provide the prompt used for the incorporation analysis: + +# Incorporation Analysis Prompt + +Task: Determine if the following feedback suggestion was incorporated into the modified version of a review. Also, categorize the given feedback into exactly one of these three categories: + +1. ACTIONABLE_VAGUE: Encouraging reviewers to rephrase vague review comments, making them more actionable for the authors. For example, the feedback says: "It would be helpful to suggest specific baselines that you think must be included. Are there particular methods you feel are missing from the current comparison? Could you elaborate why?" +2. CONTENTClarIFY: Highlighting sections of the paper that may already address some of the reviewer's questions (clarifying content). For example, the feedback says: "Does Figure 5 of the paper answer your question? In particular: 'In Transformers, the proposed technique provides $25\%$ relative improvement in wall-clock time (Figure 5)'." +3. ADDRESS_UNPROFESSIONAL: Identifying and addressing unprofessional or inappropriate remarks in the review. For example, the feedback says: "We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors." + +# Instructions: + +1. Read the original review and modified review. +2. Read the reviewer's original comment and the feedback given to the reviewer. +3. Determine if the changes suggested in the feedback were incorporated into the modified review as compared to the original review. If the reviewer's original comment appears verbatim in the modified review still, you should return FALSE for the incorporation. The incorporations should be clear and quite explicit. Think critically about if the incorporation is significant enough to count. +4. Determine which of the three categories best describes the primary purpose of the feedback. +5. Think step by step and explain your reasoning. + +Output Format: Please provide your final answer as two comma-separated values between tags, where: + +- The first boolean is TRUE or FALSE depending on whether the feedback was incorporated. +- The second string is one of these three options: ACTIONABLE_VAGUE, CONTENT_CLRIFY, or ADDRESS_UNPROFESSIONAL. + +Example: TRUE, ACTIONABLE_VAGUE + +# B Reliability tests + +We generated the following reliability tests to be run in real-time after feedback was generated. For each reliability test, we provide examples of feedback that would fail it: + +1. Praising the reviewer: make sure the feedback does not simply praise what the reviewer wrote without providing critical suggestions to improve their comment. + +Example feedback: "This is a good question that challenges a key assumption of the paper." + +2. Addressing feedback to the author: certify that the feedback is addressed to the reviewer with suggestions to make their review better, rather than addressed to the author of the paper with suggestions + +on how they can improve their paper. + +Example feedback: "To strengthen your paper, consider discussing the relationship between FrugalGPT and traditional ensembling techniques. Highlight both similarities and differences and explain how this relates to the observed quality improvements. This would provide more context for your results and situate your work within the broader field of machine learning." + +3. Restate what the reviewer wrote: does the feedback simply restate what the review comment says without providing any new meaningful and unique suggestions? + +Example reviewer comment: Can examples or further clarification be given for the 3.1 sentence "enhancing the accountability of the output"? This isn't clear, at least to me. + +Example feedback: This is a good point that could lead to improved clarity in the paper. To make your comment more actionable, you could ask the authors to provide examples or further clarification for the sentence "enhancing the accountability of the output". + +4. Format is correct: ensure that all feedback pairs are in the correct format, protecting against any errors in the pipeline that could have led to malformed feedback. + +# C Average score changes during review and rebuttal periods + +In Figure S1A, we examined the potential change in review scores (soundness, presentation, contribution, rating, and confidence) between the initial and modified reviews across the groups during the review period. We found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). We also saw that of reviewers who received feedback, reviewers who updated their review were significantly more likely to decrease their soundness score and increase their confidence score at the end of the review period (before the rebuttal period began) compared to those who did not update their review. This suggests that reviewers who updated their reviews became more confident in their assessments. + +In Figure S1B, we conducted the same analysis during the rebuttal period. Similar to the review period, we found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). Of reviewers who received feedback, those who updated their reviews significantly increased all scores except confidence compared to those who did not update their reviews. From this, we see that reviewers who updated their reviews were much more engaged in the rebuttal process. + +# D Incorporation model validation + +To test our incorporation model, we hand-labeled a test set of 222 feedback items (from 63 randomly chosen reviews that had been updated) as being incorporated into the updated review or not. We labeled 132 of those items as incorporated (59.5%) and 90 as not (40.5%). We then ran those 222 feedback items through the LLM pipeline and received a 92% accuracy rate, with a false negative rate of 0.9% and a false positive rate of 5.9% (see Supplementary Figure S2). Of the false positives, 8/13 were instances of human error where the labeler missed that the item was incorporated into the review, and the model accurately identified this incorporation. The remaining 5 false positives were due to subjectivity - the model reasoned that the reviewer partially incorporated the sentiments of the feedback, whereas the labeler did not view that as sufficient enough to count as incorporated. The two false negatives represent data points the labeler initially mislabeled and the model correctly labeled. This effectively gives us a false negative rate of 0% and a false positive rate of 2.25%, allowing us to be confident that our incorporation pipeline was highly accurate. + +![](images/9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg) +A +Supplementary Figure S1: (A) Review period score changes. (Top) There is no significant difference in updating scores (measured between initial and pre-rebuttal reviews) between the feedback and control groups. (Bottom) Among reviewers who received feedback, those who updated their reviews were more likely to decrease soundness scores $(\mathrm{p} \leq 0.05)$ and increase confidence scores $(\mathrm{p} \leq 0.05)$ compared to those who did not update their reviews. (B) Rebuttal period score changes. (Top) There is no significant difference in updating scores (measured between pre- and post-rebuttal reviews) between the feedback and control groups. (Bottom) Among feedback recipients, reviewers who updated their reviews demonstrated significantly larger score increases across all metrics (soundness: $^{**}\mathrm{p} \leq 0.01$ ; presentation: $^{***}\mathrm{p} \leq 0.001$ ; contribution: $^{\ast}\mathrm{p} \leq 0.05$ ; rating: $^{***}\mathrm{p} \leq 0.001$ ) except confidence, compared to non-updaters. + +![](images/5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg) +B + +![](images/c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg) +A + +Original review + +![](images/1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg) +B + +Modified review + +![](images/b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg) + +Feedback item + +![](images/d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg) + +Feedback incorporated (Y/N) + +![](images/db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg) +Supplementary Figure S2: (A) Incorporation model pipeline. Given the original review text, modified review text, and individual feedback item, the LLM determined if the feedback was incorporated into the modified review or not. (B) Model accuracy. Our incorporation model successfully labeled $92\%$ of the test feedback items, where human annotators determined the ground truth labeling. Of the false positives, the majority were instances of human error where the model accurately identified the missed incorporation. All of the false negatives were instances of human error that the model caught. \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09737/images/1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg b/data/2025/2504_09xxx/2504.09737/images/1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg new file mode 100644 index 0000000000000000000000000000000000000000..fffd65f47340109264d54c209cb2e4bfb281ca0d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:df27e034ee91531f433dc56733102daa5309451093ed452f70c3bab9aeed4f44 +size 1485 diff --git a/data/2025/2504_09xxx/2504.09737/images/39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg b/data/2025/2504_09xxx/2504.09737/images/39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..43dd1e63606f1e3cbb02ce664c402de199833ba8 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:752123da65a97f6ad623511ed3bca67bf785d9daef80e2b596fa7f828343e327 +size 18593 diff --git a/data/2025/2504_09xxx/2504.09737/images/4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg b/data/2025/2504_09xxx/2504.09737/images/4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bb230740ced032b4bb4dc4d87192a5f705b25f5a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6deeb0ba80f935a349436e6b7fbe0692637ebca5ad65092bbf52a27a6a214164 +size 17480 diff --git a/data/2025/2504_09xxx/2504.09737/images/53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg b/data/2025/2504_09xxx/2504.09737/images/53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bf06fea465eb6332c1dd7532fd5a4b815fc29f48 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f4ed8c123730f27e758b0761b5425d1a5dee5ebd43dad6a6836d2809a228e811 +size 83964 diff --git a/data/2025/2504_09xxx/2504.09737/images/574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg b/data/2025/2504_09xxx/2504.09737/images/574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4d7b5a189d3ef7f502fe63cf3f04b77ec06ae3cd --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7fd60c8705b2aab6dd4418188af8f5957699c9122e353914422cb07475e039c9 +size 23790 diff --git a/data/2025/2504_09xxx/2504.09737/images/5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg b/data/2025/2504_09xxx/2504.09737/images/5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg new file mode 100644 index 0000000000000000000000000000000000000000..09b0f08db997f921bbb24a4ac89b1cded1b99f09 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:552d1f376adf26cd9530b8d9b0710c162d212481cfad21a2e498e1c5e111ef08 +size 42887 diff --git a/data/2025/2504_09xxx/2504.09737/images/64415fd85d0b933fa308e605b768e68b5bb8236d64a7550c074a78efa794cf12.jpg b/data/2025/2504_09xxx/2504.09737/images/64415fd85d0b933fa308e605b768e68b5bb8236d64a7550c074a78efa794cf12.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2d6d3df26bac799f00f3d24437390567c497f090 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/64415fd85d0b933fa308e605b768e68b5bb8236d64a7550c074a78efa794cf12.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:abb02524809ce07ea0ad1b1221be12e950afeaeea86e3ab794b3f9adf32d9f27 +size 22573 diff --git a/data/2025/2504_09xxx/2504.09737/images/7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg b/data/2025/2504_09xxx/2504.09737/images/7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..0f13254c7fec95c32abe9214d3f7e7d3db863aeb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e66bf2158326726991750112ff527cc91ab5d2aaee840e4d653eddbf77eec07d +size 9289 diff --git a/data/2025/2504_09xxx/2504.09737/images/92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg b/data/2025/2504_09xxx/2504.09737/images/92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8b0db1602b493ef14e2d6a71d2671db540bfd889 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1fa0cfc484d71eafaf4d8b6a47aea739dd72bac8ac703987613aa65aaf6019de +size 121395 diff --git a/data/2025/2504_09xxx/2504.09737/images/9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg b/data/2025/2504_09xxx/2504.09737/images/9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5965ed5135292f187e9373b1c2a66ba34675e6e1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1ccd35e4378a0993d40a255df2c6a40b81877b7e01f3d6134d878397b6eb4f49 +size 46668 diff --git a/data/2025/2504_09xxx/2504.09737/images/af1bd13c6bc85f999f39c1860d84dfd23f079d50200f0c7318b39fdd1a4c968b.jpg b/data/2025/2504_09xxx/2504.09737/images/af1bd13c6bc85f999f39c1860d84dfd23f079d50200f0c7318b39fdd1a4c968b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2f831ecd0aed8a4c68bfb755113bde95a33b9069 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/af1bd13c6bc85f999f39c1860d84dfd23f079d50200f0c7318b39fdd1a4c968b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6e90d8425e2521414a6824cd6d0519e16c220aa4163e83eeef67f98c4a81c908 +size 34452 diff --git a/data/2025/2504_09xxx/2504.09737/images/b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg b/data/2025/2504_09xxx/2504.09737/images/b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a560cd449ee813a285dc4f8c62a71a508bbacc12 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15ee00c778342dc13b4b36e607a2fa66734e2df8dd0072a7c06a22592d1fb760 +size 1469 diff --git a/data/2025/2504_09xxx/2504.09737/images/b87b6b6a84e207829cfe46a705418008d45b8586d798978d7b7854b20f519099.jpg b/data/2025/2504_09xxx/2504.09737/images/b87b6b6a84e207829cfe46a705418008d45b8586d798978d7b7854b20f519099.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f832117d856e44c9a1a018eaac18c8d3122fa9c0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/b87b6b6a84e207829cfe46a705418008d45b8586d798978d7b7854b20f519099.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:af0210d1e43b0d3ca505f5ed270393353b40b401a1e854ef750d1b9a19def6af +size 321794 diff --git a/data/2025/2504_09xxx/2504.09737/images/c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg b/data/2025/2504_09xxx/2504.09737/images/c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..beec19dab0fde0bb1a0cb77f758c4d1474e33c01 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b9e2c2bc8dee1039f085623b4c0fe9e334d6bd1223862927a2085798a06a6492 +size 29670 diff --git a/data/2025/2504_09xxx/2504.09737/images/c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg b/data/2025/2504_09xxx/2504.09737/images/c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f5fd92c00198d190d889c575bfb733bccb1c74a0 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc74d4d109cd83643b39fa11d57c82e0cdbd55e1f3ac7fc4a5a7804ddb0347de +size 1434 diff --git a/data/2025/2504_09xxx/2504.09737/images/cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg b/data/2025/2504_09xxx/2504.09737/images/cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a8ecd7226ac9481f011f3986a87e7c731ec33d27 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:6274c10d41b7540279d47b02168c8118e4f31f9557846faac292bf6cf9323c4b +size 30375 diff --git a/data/2025/2504_09xxx/2504.09737/images/d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg b/data/2025/2504_09xxx/2504.09737/images/d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c167dcf638e14d2c41fc9e77c53780d196d7cb9f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b32da2d5ee550ac9f5cc79a35d1ac4445479d0d411069fbe2e34bdbbacd22fa3 +size 3140 diff --git a/data/2025/2504_09xxx/2504.09737/images/db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg b/data/2025/2504_09xxx/2504.09737/images/db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg new file mode 100644 index 0000000000000000000000000000000000000000..652f22c4673dbf4d0dd0ec251a2b46a56b33db94 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:73905baad1b9fce35e9a8b522552f43420cad72da7bbab0fa72be7b35ac41cb1 +size 17609 diff --git a/data/2025/2504_09xxx/2504.09737/images/e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg b/data/2025/2504_09xxx/2504.09737/images/e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..92268a165ecaadbf8f6799964a42a125d9e940be --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51087da40e8a7ad9d47b220260ee601b73a30d3c52beda596b6c1547ab6b2cb9 +size 45172 diff --git a/data/2025/2504_09xxx/2504.09737/images/e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg b/data/2025/2504_09xxx/2504.09737/images/e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..42e19ea05bf6d77cbfa710d3b07979234c9b2d3d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c62cb6bba86fac1ebd9e01d9a4e49e1c2cd9cafcbee78f901a845b2d28827393 +size 30810 diff --git a/data/2025/2504_09xxx/2504.09737/images/fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg b/data/2025/2504_09xxx/2504.09737/images/fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg new file mode 100644 index 0000000000000000000000000000000000000000..79d7b733d52fa6422002f6609624761da3d0511f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/images/fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3ef9ed3c623a7c19393ee9c892b4e4ed7344ab2ffa4a3b25575735ad64574fa +size 167703 diff --git a/data/2025/2504_09xxx/2504.09737/layout.json b/data/2025/2504_09xxx/2504.09737/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..c00df469a7532da3e235eb286440ca555a475c8c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09737/layout.json @@ -0,0 +1,17598 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 70, + 107, + 541, + 148 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 107, + 541, + 148 + ], + "spans": [ + { + "bbox": [ + 70, + 107, + 541, + 148 + ], + "type": "text", + "content": "Can LLM feedback enhance review quality? A randomized study of 20K reviews at ICLR 2025" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 129, + 162, + 479, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 129, + 162, + 479, + 191 + ], + "spans": [ + { + "bbox": [ + 129, + 162, + 479, + 191 + ], + "type": "text", + "content": "Nitya Thakkar1, Mert Yuksekgonul1, Jake Silberg1, Animesh Garg2, Nanyun Peng3, Fei Sha4, Rose Yu5, Carl Vondrick6, James Zou1" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 260, + 192, + 351, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 260, + 192, + 351, + 205 + ], + "spans": [ + { + "bbox": [ + 260, + 192, + 351, + 205 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 260, + 192, + 351, + 205 + ], + "type": "text", + "content": "Stanford University" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 233, + 206, + 377, + 219 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 206, + 377, + 219 + ], + "spans": [ + { + "bbox": [ + 233, + 206, + 377, + 219 + ], + "type": "text", + "content": "2Georgia Institute of Technology" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 222, + 220, + 388, + 232 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 222, + 220, + 388, + 232 + ], + "spans": [ + { + "bbox": [ + 222, + 220, + 388, + 232 + ], + "type": "text", + "content": "3University of California, Los Angeles" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 266, + 234, + 344, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 266, + 234, + 344, + 246 + ], + "spans": [ + { + "bbox": [ + 266, + 234, + 344, + 246 + ], + "type": "text", + "content": "4Google Research" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 225, + 247, + 384, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 225, + 247, + 384, + 260 + ], + "spans": [ + { + "bbox": [ + 225, + 247, + 384, + 260 + ], + "type": "text", + "content": "5University of California, San Diego" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 257, + 262, + 353, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 257, + 262, + 353, + 274 + ], + "spans": [ + { + "bbox": [ + 257, + 262, + 353, + 274 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 257, + 262, + 353, + 274 + ], + "type": "text", + "content": "Columbia University" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 283, + 299, + 326, + 310 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 283, + 299, + 326, + 310 + ], + "spans": [ + { + "bbox": [ + 283, + 299, + 326, + 310 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 93, + 314, + 517, + 513 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 314, + 517, + 513 + ], + "spans": [ + { + "bbox": [ + 93, + 314, + 517, + 513 + ], + "type": "text", + "content": "Peer review at AI conferences is stressed by rapidly rising submission volumes, leading to deteriorating review quality and increased author dissatisfaction. To address these issues, we developed Review Feedback Agent, a system leveraging multiple large language models (LLMs) to improve review clarity and actionability by providing automated feedback on vague comments, content misunderstandings, and unprofessional remarks to reviewers. Implemented at ICLR 2025 as a large randomized control study, our system provided optional feedback to more than 20,000 randomly selected reviews. To ensure high-quality feedback for reviewers at this scale, we also developed a suite of automated reliability tests powered by LLMs that acted as guardrails to ensure feedback quality, with feedback only being sent to reviewers if it passed all the tests. The results show that " + }, + { + "bbox": [ + 93, + 314, + 517, + 513 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 93, + 314, + 517, + 513 + ], + "type": "text", + "content": " of reviewers who received feedback updated their reviews, and over 12,000 feedback suggestions from the agent were incorporated by those reviewers. This suggests that many reviewers found the AI-generated feedback sufficiently helpful to merit updating their reviews. Incorporating AI feedback led to significantly longer reviews (an average increase of 80 words among those who updated after receiving feedback) and more informative reviews, as evaluated by blinded researchers. Moreover, reviewers who were selected to receive AI feedback were also more engaged during paper rebuttals, as seen in longer author-reviewer discussions. This work demonstrates that carefully designed LLM-generated review feedback can enhance peer review quality by making reviews more specific and actionable while increasing engagement between reviewers and authors. The Review Feedback Agent is publicly available at https://github.com/zou-group/review_feedback_agent." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 69, + 530, + 186, + 544 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 530, + 186, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 530, + 186, + 544 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "content": "Scientific peer review is a critical step before publication, where domain experts evaluate the research to ensure thoroughness and scientific integrity, prevent false claims, and provide a strong foundation for future work [1, 2]. High-quality reviews are essential for authors to improve their work, address key limitations, and advance scientific progress. However, in a survey of 11,800 researchers worldwide, while " + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "inline_equation", + "content": "98\\%" + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "content": " view peer review as essential to maintaining the quality and integrity of academic communication, only " + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "inline_equation", + "content": "55.4\\%" + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "content": " expressed satisfaction with the quality of reviews they receive [3]. This dissatisfaction has grown as obtaining constructive and high-quality peer reviews has become more challenging due to the increase in the number of paper submissions, especially in fast-moving areas like Artificial Intelligence (AI) [4, 5]. For example, the International Conference on Learning Representations (ICLR) experienced year-over-year submission increases of " + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "inline_equation", + "content": "47\\%" + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "content": " in 2024 and " + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "inline_equation", + "content": "61\\%" + }, + { + "bbox": [ + 67, + 555, + 541, + 697 + ], + "type": "text", + "content": " in 2025 [6]. To maintain a rigorous and meaningful peer review process amid this growth, it is crucial to address the growing burden on reviewers and the subsequent deterioration in review quality." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 68, + 698, + 541, + 723 + ], + "type": "text", + "content": "Authors at AI conferences increasingly report receiving short, vague reviews with criticisms like 'not novel' or 'not state-of-the-art (SOTA)' [7]. At the 2023 Association for Computational Linguistics meeting," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "spans": [ + { + "bbox": [ + 14, + 214, + 37, + 559 + ], + "type": "text", + "content": "arXiv:2504.09737v1 [cs.AI] 13 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "1" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "text", + "content": "authors flagged " + }, + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "inline_equation", + "content": "12.9\\%" + }, + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "text", + "content": " of reviews for poor quality, primarily due to these vague, surface-level criticisms [8]. The peer review system is further strained by reviewers being assigned papers outside their expertise [9] and the same papers being reviewed multiple times due to high rejection rates [1]. Additionally, the 2014 NeurIPS Experiment highlighted inconsistencies in the peer review process by showing that approximately " + }, + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 67, + 72, + 541, + 167 + ], + "type": "text", + "content": " of paper acceptance decisions differed between two independent review committees [10]. These issues not only frustrate authors but potentially allow weaker research to be accepted while strong work is rejected, ultimately preventing papers from reaching their full potential due to the decline of meaningful dialogue between reviewers and authors." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 167, + 541, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 167, + 541, + 228 + ], + "spans": [ + { + "bbox": [ + 67, + 167, + 541, + 228 + ], + "type": "text", + "content": "Large language models (LLMs) [11] have the potential to enhance the quality and usefulness of peer reviews for authors [12]. Recent studies demonstrated that LLMs can serve as effective critics, generating detailed and constructive feedback [13, 14]. Furthermore, LLMs have already shown high utilization in the peer review process. Reviewers are increasingly turning to LLMs to assist in drafting their reviews, with an estimated " + }, + { + "bbox": [ + 67, + 167, + 541, + 228 + ], + "type": "inline_equation", + "content": "10.6\\%" + }, + { + "bbox": [ + 67, + 167, + 541, + 228 + ], + "type": "text", + "content": " of reviewers at ICLR 2024 using LLMs for this purpose [15, 16]." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 228, + 541, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 541, + 346 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 541, + 346 + ], + "type": "text", + "content": "To explore how LLMs can improve review quality at scale, we introduce Review Feedback Agent, a multi-LLM system designed to enhance the clarity and actionability of reviews by providing feedback to reviewers. Piloted at ICLR 2025 as a large randomized control study, our agent provided feedback to over 20,000 randomly selected reviews (representing half of all ICLR 2025 reviews) over four weeks from October 15 to November 12, 2024. The generated feedback primarily focused on minimizing instances of vague and unjustified comments while also addressing content misinterpretations and unprofessional remarks. Using Claude Sonnet 3.5 as the backbone [11], we created a system of five LLMs that collaborated to generate high-quality feedback. To enhance the system's reliability against potential errors or failures in instruction-following [17, 18], we developed a set of reliability tests to evaluate specific qualities of the generated feedback; the feedback was only posted if it passed all of these tests." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 347, + 541, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 347, + 541, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 347, + 541, + 442 + ], + "type": "text", + "content": "Summary of main findings. Of the randomly selected ICLR reviews that received AI feedback, " + }, + { + "bbox": [ + 67, + 347, + 541, + 442 + ], + "type": "inline_equation", + "content": "26.6\\%" + }, + { + "bbox": [ + 67, + 347, + 541, + 442 + ], + "type": "text", + "content": " of reviewers updated their reviews, altogether incorporating 12,222 suggestions from the feedback agent into the reviews. Blinded ML researchers labeled these revised reviews as more informative and clearer than their initial versions. Reviewers who updated after receiving feedback increased the length of reviews by an average of 80 words. Furthermore, AI feedback led to more engaged discussions during the rebuttal period, as seen through longer author and reviewer responses. We also observed that reviewers who received feedback were more likely to change their scores after the rebuttal period, which was consistent with a more engaged rebuttal process." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 443, + 541, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 443, + 541, + 479 + ], + "spans": [ + { + "bbox": [ + 67, + 443, + 541, + 479 + ], + "type": "text", + "content": "In this study, we present the first large-scale deployment for using LLMs to assist peer review. By making reviews more actionable and informative, we aim to enhance the peer review experience and promote a more constructive scientific process." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 497, + 160, + 511 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 497, + 160, + 511 + ], + "spans": [ + { + "bbox": [ + 69, + 497, + 160, + 511 + ], + "type": "text", + "content": "2 Methods" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 521, + 541, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 521, + 541, + 570 + ], + "spans": [ + { + "bbox": [ + 67, + 521, + 541, + 570 + ], + "type": "text", + "content": "In what follows, we first describe the review feedback experiment, including its goals and our technical setup with OpenReview. Next, we outline the architecture of our Review Feedback Agent and explain how the system was designed to meet our goals while ensuring a high level of reliability. In total, the agent automatically provided feedback to over 20,000 reviews at ICLR 2025." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 584, + 336, + 599 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 584, + 336, + 599 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 336, + 599 + ], + "type": "text", + "content": "2.1 ICLR 2025 review feedback experiment" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 604, + 541, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 604, + 541, + 675 + ], + "spans": [ + { + "bbox": [ + 67, + 604, + 541, + 675 + ], + "type": "text", + "content": "Our pilot study was conducted in collaboration with ICLR 2025 and OpenReview. As one of the world's fastest-growing AI conferences, ICLR receives thousands of paper submissions yearly; in 2025, ICLR received 11,603 submissions. Each submission is assigned an average of 4 reviewers, and all reviews are standardized to include the same sections: summary, strengths, weaknesses, and questions. Furthermore, reviewers provide scores on a scale of 1 (low) to 10 (high), rating the paper according to the following categories: soundness, presentation, contribution, rating, and confidence." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 675, + 541, + 712 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 675, + 541, + 712 + ], + "spans": [ + { + "bbox": [ + 67, + 675, + 541, + 712 + ], + "type": "text", + "content": "Goal: Our goal was to enhance review quality and, in particular, reduce low-information content reviews. Toward this goal, we identified three categories of common issues in reviews that we hoped to improve by providing LLM-generated feedback. The common issues are: 1) vague or generic critiques in reviews (the" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "2" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 103, + 272, + 285 + ], + "blocks": [ + { + "bbox": [ + 74, + 87, + 82, + 95 + ], + "lines": [ + { + "bbox": [ + 74, + 87, + 82, + 95 + ], + "spans": [ + { + "bbox": [ + 74, + 87, + 82, + 95 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 103, + 272, + 285 + ], + "lines": [ + { + "bbox": [ + 78, + 103, + 272, + 285 + ], + "spans": [ + { + "bbox": [ + 78, + 103, + 272, + 285 + ], + "type": "image", + "image_path": "cb3cb6790063b0f0ae0f127965da667e45b0ac6b2a5535a4245588d35aa6d9d9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 286, + 95, + 533, + 287 + ], + "blocks": [ + { + "bbox": [ + 282, + 88, + 288, + 95 + ], + "lines": [ + { + "bbox": [ + 282, + 88, + 288, + 95 + ], + "spans": [ + { + "bbox": [ + 282, + 88, + 288, + 95 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 286, + 95, + 533, + 287 + ], + "lines": [ + { + "bbox": [ + 286, + 95, + 533, + 287 + ], + "spans": [ + { + "bbox": [ + 286, + 95, + 533, + 287 + ], + "type": "image", + "image_path": "53730c84253867313b52b006f4df0105e6064c8df741a4e4dceee4f0f040a1ce.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 85, + 299, + 536, + 531 + ], + "blocks": [ + { + "bbox": [ + 74, + 292, + 82, + 298 + ], + "lines": [ + { + "bbox": [ + 74, + 292, + 82, + 298 + ], + "spans": [ + { + "bbox": [ + 74, + 292, + 82, + 298 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 85, + 299, + 536, + 531 + ], + "lines": [ + { + "bbox": [ + 85, + 299, + 536, + 531 + ], + "spans": [ + { + "bbox": [ + 85, + 299, + 536, + 531 + ], + "type": "image", + "image_path": "92b4aa5a4616a8525289426598d6d2f1ec4b3b5673fe1fdf950f45f5a7d0227e.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 563, + 541, + 709 + ], + "lines": [ + { + "bbox": [ + 67, + 563, + 541, + 709 + ], + "spans": [ + { + "bbox": [ + 67, + 563, + 541, + 709 + ], + "type": "text", + "content": "Figure 1: (A) Randomized controlled study setup. Before the start of the review period, we randomly assigned all submissions to one of three groups to determine how many of its reviews received feedback: none, half, or all. When a review selected to receive feedback was submitted, the agent generated and posted feedback after 1 hour. Reviewers could update their review, optionally, based on the feedback until the end of the review period, which ran from October 14 to November 12, 2024. (B) Feedback categories. Our system is designed to address three main types of review comments. Here, we provide examples of comments that would receive feedback from our agent, as well as examples of the generated feedback. (C) Review Feedback Agent. Our system consists of five LLMs (Actors, Aggregator, Critic, and Formmatter). Two parallel Actors generate the initial feedback, then pass it to the Aggregator, the Critic, and finally the Formmatter. Finally, the feedback is passed through the reliability tests; upon successfully passing, the feedback is posted on a review. We provide examples of comments and feedback given to those comments by our system." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "3" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 132 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 132 + ], + "type": "text", + "content": "feedback asks the reviewers to be more specific and actionable); 2) questions or confusions that could be addressed by overlooked parts of the paper (the feedback highlights relevant sections); and 3) unprofessional statements in the review (the feedback asks the reviewer to rephrase). For each comment in a review, the Review Feedback Agent determined if it fell into any of these problematic categories and, if so, provided feedback on that specific review comment." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 133, + 541, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 541, + 168 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 541, + 168 + ], + "type": "text", + "content": "Experimental setup: We set up this experiment as a Randomized Control Trial (RCT) to enable us to make causal inferences about how receiving feedback influences the peer review process. Before the beginning of the review period, we randomly split papers into one of three equal groups (see Figure 1A):" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 80, + 174, + 443, + 224 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 81, + 174, + 302, + 186 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 174, + 302, + 186 + ], + "spans": [ + { + "bbox": [ + 81, + 174, + 302, + 186 + ], + "type": "text", + "content": "1. No reviews for this paper will receive feedback," + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 80, + 193, + 443, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 193, + 443, + 205 + ], + "spans": [ + { + "bbox": [ + 80, + 193, + 443, + 205 + ], + "type": "text", + "content": "2. Half of the reviews for this paper will be randomly selected to receive feedback," + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 81, + 212, + 302, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 212, + 302, + 224 + ], + "spans": [ + { + "bbox": [ + 81, + 212, + 302, + 224 + ], + "type": "text", + "content": "3. All reviews for this paper will receive feedback." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 67, + 229, + 541, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 229, + 541, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 229, + 541, + 301 + ], + "type": "text", + "content": "For reviews randomly assigned to receive feedback, the Review Feedback Agent, wrapped in an API, was automatically triggered when a reviewer first submitted their review on OpenReview. We delayed the feedback generation by one hour after a review was initially submitted to allow reviewers time to make any small edits (e.g., typo corrections). See Figure 1A for an example timeline. The agent posted feedback to reviews through the OpenReview interface by replying to reviews with the feedback wrapped in a comment. See Figure 2 for an example of what feedback looked like on the OpenReview website." + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 144, + 320, + 467, + 630 + ], + "blocks": [ + { + "bbox": [ + 144, + 320, + 467, + 630 + ], + "lines": [ + { + "bbox": [ + 144, + 320, + 467, + 630 + ], + "spans": [ + { + "bbox": [ + 144, + 320, + 467, + 630 + ], + "type": "image", + "image_path": "fe3c200bfa72cd0d795f0d8bd31f374bae1fefefefb74b32eebe2adb5d84a559.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "lines": [ + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 541, + 688 + ], + "type": "text", + "content": "Figure 2: OpenReview interface. Here, we provide an example of feedback posted to a review on the OpenReview website (with consent from the reviewer). Feedback is only visible to the reviewer and the ICLR program chairs and was posted roughly one hour after the initial review was submitted." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 698, + 541, + 723 + ], + "type": "text", + "content": "The agent only provided feedback on the initial review, and there was no subsequent interaction between the reviewer and the feedback system after that time point. The feedback is only visible to the reviewer" + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "4" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 131 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 131 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 131 + ], + "type": "text", + "content": "and the ICLR program chairs; it was not shared with other reviewers, authors, or area chairs and was not a factor in the acceptance decisions. Reviewers were informed that the feedback was generated by a LLM and could choose to ignore the feedback or revise their review in response, as the system did not make any direct changes. Finally, we did not access or store any identifiable information about authors or reviewers. This study was reviewed by IRB and deemed low risk." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "spans": [ + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "text", + "content": "Statistics: Around " + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "inline_equation", + "content": "50\\%" + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "text", + "content": " of reviews were randomly selected to receive feedback. Of the 44,831 reviews submitted on 11,553 unique papers (we excluded desk-rejected submissions), we posted feedback to 18,946 reviews " + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "inline_equation", + "content": "(42.3\\%)" + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "text", + "content": " over 4 weeks from October 15 to November 12, 2024 (see Figure 2A). Less than " + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "inline_equation", + "content": "8\\%" + }, + { + "bbox": [ + 67, + 133, + 541, + 216 + ], + "type": "text", + "content": " of the selected reviews did not receive feedback for one of two reasons: 2,692 reviews were originally well-written and did not need feedback, while 829 reviews had feedback that failed the reliability tests. Each review took roughly one minute to run through our entire pipeline and cost around 50 cents. On average, each review that received feedback was given 3-4 feedback comments, with a minimum of 1 and a maximum of 17." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 230, + 244, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 230, + 244, + 243 + ], + "spans": [ + { + "bbox": [ + 69, + 230, + 244, + 243 + ], + "type": "text", + "content": "2.2 Review Feedback Agent" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 251, + 541, + 273 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 251, + 541, + 273 + ], + "spans": [ + { + "bbox": [ + 67, + 251, + 541, + 273 + ], + "type": "text", + "content": "The Review Feedback Agent aimed to provide feedback that helped reviewers make their comments more specific, constructive, and actionable for the authors." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 274, + 541, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 274, + 541, + 357 + ], + "spans": [ + { + "bbox": [ + 67, + 274, + 541, + 357 + ], + "type": "text", + "content": "Feedback categories: The Review Feedback Agent provided suggestions on three potential categories of issues in reviews. We curated these categories by examining reviewer guidelines from several AI conferences [19, 20, 21, 22] and evaluating previously identified patterns of \"lazy reviewer thinking\" [7]. We also took inspiration from the ARR guidelines, where 16 common reviewer heuristics are outlined [23]. Importantly, the agent was not designed to suggest new ideas to add to the review; rather, it only focused on revising the existing ideas and preventing lower-quality reviews. The target feedback areas that we ultimately focused on were:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 368, + 538, + 468 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 81, + 368, + 538, + 391 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 368, + 538, + 391 + ], + "spans": [ + { + "bbox": [ + 81, + 368, + 538, + 391 + ], + "type": "text", + "content": "1. Improving specificity: Encouraging reviewers to rephrase vague review comments, making them more specific, actionable, and justified for the authors." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 80, + 399, + 538, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 399, + 538, + 422 + ], + "spans": [ + { + "bbox": [ + 80, + 399, + 538, + 422 + ], + "type": "text", + "content": "2. Addressing misunderstandings: Highlighting sections of the paper that may already address some of the reviewer's questions or confusion." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 80, + 431, + 538, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 431, + 538, + 468 + ], + "spans": [ + { + "bbox": [ + 80, + 431, + 538, + 468 + ], + "type": "text", + "content": "3. Reducing unprofessional remarks: Identifying and addressing unprofessional or inappropriate remarks in the review. A 2019 study of 1,106 researchers found that " + }, + { + "bbox": [ + 80, + 431, + 538, + 468 + ], + "type": "inline_equation", + "content": "58\\%" + }, + { + "bbox": [ + 80, + 431, + 538, + 468 + ], + "type": "text", + "content": " had received an unprofessional review, highlighting its prevalence [24]." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 477, + 538, + 501 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 477, + 538, + 501 + ], + "spans": [ + { + "bbox": [ + 68, + 477, + 538, + 501 + ], + "type": "text", + "content": "See Figure 1B for examples of real reviewer comments (from ICLR 2024 reviews and public journal reviews) in each category that would receive feedback and examples of feedback that would be given." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 68, + 502, + 538, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 502, + 538, + 536 + ], + "spans": [ + { + "bbox": [ + 68, + 502, + 538, + 536 + ], + "type": "text", + "content": "Preprocessing: The agent was provided with the paper PDF's text (extracted using pypdf's PDFReader [25]) and the review text as input. We extracted the summary, strengths, weaknesses, and questions sections from the review. We did not provide the agent with any of the scores the reviewer initially gave the paper." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "spans": [ + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": "Architecture: The agent generated a list of pairs, with each pair consisting of a review comment that fit into one of the problematic categories above and the corresponding feedback provided for that comment. The agent was composed of a pipeline of five LLMs (see Algorithm 1, Figure 1C). We used the Claude Sonnet 3.5 (June 20, 2024) model [11] as the backbone; we picked the backbone model by generating feedback with the same prompt using GPT-4o, Gemini 1.5 Flash, and Claude Sonnet 3.5 and then conducting a blind preference evaluation. Additionally, through testing, we found that one LLM was insufficient to generate high-quality feedback and format it correctly, thus, we instantiated the multi-call pipeline. First, we defined two parallel actor LLMs to generate the initial set of feedback based on the previously defined target areas. The actors were provided with the initial review " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "(R)" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": " and paper text " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "(P)" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": " as inputs. We used two separate actors to optimize for feedback diversity. Then, we passed the two lists of feedback, " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "F_{1}" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "F_{2}" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": ", to an aggregator LLM, which merged the lists into one set of feedback, " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "F_{combined}" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": ". Next, we passed this candidate list to a critic LLM responsible for ensuring the feedback was accurate and clear. Importantly, the critic also removed any feedback that was too superficial or nitpicky, defined through various in-context examples (see Appendix A for the examples), as we did not want to overwhelm or annoy reviewers. Finally, aformatter LLM was provided with this final list, " + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "inline_equation", + "content": "F_{filtered}" + }, + { + "bbox": [ + 68, + 538, + 541, + 716 + ], + "type": "text", + "content": ", and formatted it into pairs:" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 308, + 750 + ], + "type": "text", + "content": "5" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "bbox": [ + 84, + 72, + 340, + 103 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 84, + 72, + 253, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 72, + 253, + 83 + ], + "spans": [ + { + "bbox": [ + 84, + 72, + 253, + 83 + ], + "type": "text", + "content": "- **Reviewer comment:** a comment" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 91, + 340, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 91, + 340, + 103 + ], + "spans": [ + { + "bbox": [ + 84, + 91, + 340, + 103 + ], + "type": "text", + "content": "- **Feedback to the reviewer:** feedback to the comment" + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "type": "code", + "bbox": [ + 71, + 134, + 328, + 266 + ], + "blocks": [ + { + "bbox": [ + 70, + 121, + 241, + 133 + ], + "lines": [ + { + "bbox": [ + 70, + 121, + 241, + 133 + ], + "spans": [ + { + "bbox": [ + 70, + 121, + 241, + 133 + ], + "type": "text", + "content": "Algorithm 1 Review Feedback Agent" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "lines": [ + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "spans": [ + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": "1: Input: Paper text " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "P" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": ", Review " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "R" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": ", max attempts " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "T = 2" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n2: for " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "t = 1" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " do \n3: " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{1} \\gets \\mathrm{Actor}_{1}(P, R)" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n4: " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{2} \\gets \\mathrm{Actor}_{2}(P, R)" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n5: " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{combined} \\gets \\mathrm{Aggregator}(F_{1}, F_{2})" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n6: " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{filtered} \\gets \\mathrm{Critic}(F_{combined})" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n7: " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{final} \\gets \\mathrm{Formatter}(F_{filtered})" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n8: if PassReliabilityTests(Ffinal) then return " + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "inline_equation", + "content": "F_{final}" + }, + { + "bbox": [ + 71, + 134, + 328, + 266 + ], + "type": "text", + "content": " \n9: end if \n10: end for \n11: return error" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + } + ], + "index": 4, + "sub_type": "algorithm" + }, + { + "bbox": [ + 67, + 283, + 541, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 283, + 541, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 283, + 541, + 354 + ], + "type": "text", + "content": "See Appendix A for the exact prompts used. To refine this system, we constructed a test set of 50 ICLR 2024 reviews we perceived to be of low quality in one or more of our target areas (i.e., they made vague comments, asked questions that were present in the paper already, and/or made unprofessional remarks). We iteratively ran our agent on this test set, examined the generated feedback, and refined the prompts to optimize the results. This procedure ultimately led to prompts that produced high-quality feedback for all 50 reviews in the test set." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 354, + 541, + 461 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 541, + 461 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 541, + 461 + ], + "type": "text", + "content": "Reliability testing: Inspired by [26], we also developed a suite of reliability tests designed to act as guardrails, ensuring the quality of our generated feedback. Reliability tests evaluate specific attributes of a model's output. The four reliability tests we developed ensured the feedback provided constructive suggestions, addressed the reviewer, did not simply restate what the reviewer wrote, and was formatted correctly. We provide the exact reliability tests we used and examples of feedback that would fail the reliability tests in Appendix B. We developed up to five test cases for each reliability test and refined the reliability test prompts until we passed all the test cases. To refine our Review Feedback Agent's pipeline and prompts, we passed our test set reviews through the validated reliability tests until we achieved a " + }, + { + "bbox": [ + 67, + 354, + 541, + 461 + ], + "type": "inline_equation", + "content": "100\\%" + }, + { + "bbox": [ + 67, + 354, + 541, + 461 + ], + "type": "text", + "content": " pass rate." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "text", + "content": "Feedback was only posted to a review if it passed all our reliability tests; if it failed, we re-ran the entire pipeline a second time (" + }, + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "inline_equation", + "content": "T = 2" + }, + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "text", + "content": ") to generate new feedback. Upon a second fail, we returned an error and did not post the feedback. Over " + }, + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "inline_equation", + "content": "96\\%" + }, + { + "bbox": [ + 67, + 462, + 541, + 499 + ], + "type": "text", + "content": " of generated feedback for ICLR 2025 reviews passed all reliability tests." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 516, + 149, + 530 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 516, + 149, + 530 + ], + "spans": [ + { + "bbox": [ + 69, + 516, + 149, + 530 + ], + "type": "text", + "content": "3 Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 541, + 481, + 556 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 541, + 481, + 556 + ], + "spans": [ + { + "bbox": [ + 67, + 541, + 481, + 556 + ], + "type": "text", + "content": "3.1 Impact of feedback on review updates and reviewer engagement" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 561, + 541, + 597 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 561, + 541, + 597 + ], + "spans": [ + { + "bbox": [ + 67, + 561, + 541, + 597 + ], + "type": "text", + "content": "First, we aimed to objectively measure how many reviewers updated their reviews after receiving feedback compared to those who did not receive feedback. This enabled us to assess how the feedback may have been associated with changes in various components of their review, such as length and scores." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 597, + 541, + 668 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 597, + 541, + 668 + ], + "spans": [ + { + "bbox": [ + 67, + 597, + 541, + 668 + ], + "type": "text", + "content": "We conducted this ICLR experiment as a randomized controlled study by randomly splitting all reviews into one of two groups: not selected to receive feedback (control group) or selected to receive feedback (feedback group) - see Section 2 for more details. Note that the group selected to receive feedback includes the " + }, + { + "bbox": [ + 67, + 597, + 541, + 668 + ], + "type": "inline_equation", + "content": "7.9\\%" + }, + { + "bbox": [ + 67, + 597, + 541, + 668 + ], + "type": "text", + "content": " of reviews that were selected but did not actually receive feedback, mostly because AI deemed feedback not necessary there. This intent-to-treat definition of the feedback group enables us to conduct causal analysis but could dilute the actual effect of the feedback." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 669, + 541, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 669, + 541, + 717 + ], + "spans": [ + { + "bbox": [ + 67, + 669, + 541, + 717 + ], + "type": "text", + "content": "Of all reviews in the feedback group, we further defined reviews that successfully received feedback as either being not updated or updated. A review is not updated if a reviewer did not edit their review after receiving feedback or if the edit distance between the initial and modified review was less than 5; this edit distance filtering accounted for minor updates such as fixing typos or modifying scores. Conversely, a review" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "6" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 113, + 140, + 282, + 417 + ], + "blocks": [ + { + "bbox": [ + 96, + 138, + 104, + 145 + ], + "lines": [ + { + "bbox": [ + 96, + 138, + 104, + 145 + ], + "spans": [ + { + "bbox": [ + 96, + 138, + 104, + 145 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 113, + 140, + 282, + 417 + ], + "lines": [ + { + "bbox": [ + 113, + 140, + 282, + 417 + ], + "spans": [ + { + "bbox": [ + 113, + 140, + 282, + 417 + ], + "type": "image", + "image_path": "c390e294f465635e741d8f40fc61356f4edd26a8fd74e07132339f3a99ecf6e9.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "lines": [ + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": "Figure 3: (A) Feedback statistics. Among all ICLR 2025 reviews, 22,467 were randomly selected to receive feedback (feedback group), and 22,364 were randomly selected not to receive feedback (control group). Of those selected to receive feedback, 18,946 " + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "inline_equation", + "content": "(42.3\\%)" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": " successfully received feedback, with " + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "inline_equation", + "content": "26.6\\%" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": " of those reviewers updating their reviews. (B) Update rates. (Top) Most reviews were submitted 2-3 days before the review deadline (November 4, 2024). (Bottom) Reviewers were more likely to update their review if they submitted it early relative to the deadline. Reviewers who received feedback were much more likely to update their reviews than those in the control group, with a difference of approximately 17 percentage points. (C) Average change in review length (measured as number of words). Review length is measured only for the following sections: summary, strengths, weaknesses, and questions. The difference in review length between the control and feedback groups is statistically significant " + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "inline_equation", + "content": "(^{**}\\mathrm{p} \\leq 0.01)" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": ", with being selected to receive feedback leading to an average increase of 14 words more (a " + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "inline_equation", + "content": "200\\%" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": " increase) in review length compared to the control group. The difference is more pronounced between the not-updated and updated groups " + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "inline_equation", + "content": "(^{***}\\mathrm{p} \\leq 0.001)" + }, + { + "bbox": [ + 67, + 500, + 541, + 654 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 309, + 143, + 512, + 414 + ], + "blocks": [ + { + "bbox": [ + 291, + 138, + 298, + 145 + ], + "lines": [ + { + "bbox": [ + 291, + 138, + 298, + 145 + ], + "spans": [ + { + "bbox": [ + 291, + 138, + 298, + 145 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 309, + 143, + 512, + 414 + ], + "lines": [ + { + "bbox": [ + 309, + 143, + 512, + 414 + ], + "spans": [ + { + "bbox": [ + 309, + 143, + 512, + 414 + ], + "type": "image", + "image_path": "e626483d5b51e3bf27cb3db70cce10e0b7472c4c1220b887075883a755b16eb6.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "table", + "bbox": [ + 105, + 454, + 504, + 491 + ], + "blocks": [ + { + "bbox": [ + 94, + 437, + 102, + 445 + ], + "lines": [ + { + "bbox": [ + 94, + 437, + 102, + 445 + ], + "spans": [ + { + "bbox": [ + 94, + 437, + 102, + 445 + ], + "type": "text", + "content": "C" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 105, + 454, + 504, + 491 + ], + "lines": [ + { + "bbox": [ + 105, + 454, + 504, + 491 + ], + "spans": [ + { + "bbox": [ + 105, + 454, + 504, + 491 + ], + "type": "table", + "html": "
ControlFeedbackNot updatedUpdated
Average change in length7.021.0** (Δ + 200%)2.180.3***
", + "image_path": "64415fd85d0b933fa308e605b768e68b5bb8236d64a7550c074a78efa794cf12.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_body" + } + ], + "index": 5 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 308, + 750 + ], + "type": "text", + "content": "7" + } + ] + } + ], + "index": 7 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 72, + 541, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 72, + 541, + 95 + ], + "spans": [ + { + "bbox": [ + 68, + 72, + 541, + 95 + ], + "type": "text", + "content": "is updated if a reviewer did edit their review after receiving feedback and the edit distance between the initial and modified review was greater than 5." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 96, + 541, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 96, + 541, + 167 + ], + "spans": [ + { + "bbox": [ + 68, + 96, + 541, + 167 + ], + "type": "text", + "content": "Of the 18,946 reviews that successfully received feedback, 5,031 (26.6%) reviews were updated (Figure 3A). Out of the 22,364 reviews in the control group, only 2,103 (9.4%) were updated; here, we define updated for the control group as a reviewer updating at least one hour after posting (the time it takes for the feedback group to receive feedback) with an edit distance greater than 5. With an update rate difference of roughly 17 percentage points (Figure 3B), we can see that reviews that received feedback were much more likely to be updated than those that did not." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 168, + 541, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 168, + 541, + 263 + ], + "spans": [ + { + "bbox": [ + 68, + 168, + 541, + 263 + ], + "type": "text", + "content": "In Figure 3B, we also see that reviewers who submitted early relative to the deadline (November 4, 2024) were more likely to update their review than those who submitted close to or after the deadline. This suggests that more organized reviewers, who may already be more engaged in the review process, were more likely to revise their reviews in response to feedback. While this will influence our analysis comparing the not updated and updated groups, we can be confident that the underlying distribution of the control and feedback groups is similar and not biased by factors such as reviewer organization because we conducted this as an RCT. Randomization helps mitigate such biases, making it possible to assess the causal impact of the feedback on the peer review process." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "spans": [ + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "content": "Finally, we analyzed the change in review length (number of words in the summary, strengths, weaknesses, and questions sections) among the groups (Figure 3C). We compared the initial review length and the modified review length; we refer to modified reviews as the review at the end of the four-week review period before the rebuttal period began (only these modified reviews are made public to authors). We saw that review length, on average, increased across all groups. First, we observed that being selected to receive feedback caused the average review length to increase by about 14 words more than reviews that were not selected to receive feedback. Note that this effect size is deflated due to the substantial number of reviewers who received feedback but did not update their review, as well as the " + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "inline_equation", + "content": "7.9\\%" + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "content": " of reviews that were selected to receive feedback but did not actually get it. We also see that updating the review after receiving feedback is associated with a statistically significant increase in review length (80 words) compared to not updating the review (2 words). We can infer that reviewers who updated their reviews were editing them more consistently to incorporate more detail and nuance, explaining this large increase in length. In addition to feedback causing an increase in review length, we also found that a significantly higher percentage of reviewers who received feedback edited at least one of their scores (soundness, presentation, contribution, rating, and confidence) during the review period, with " + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "inline_equation", + "content": "8.1\\%" + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "content": " of them making edits compared to " + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "inline_equation", + "content": "7.5\\%" + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "content": " among the control group " + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "inline_equation", + "content": "(p \\leq 0.05)" + }, + { + "bbox": [ + 70, + 264, + 541, + 468 + ], + "type": "text", + "content": ". In Appendix C, we observe no significant difference in the average score changes between the feedback and control groups." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 68, + 482, + 416, + 495 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 482, + 416, + 495 + ], + "spans": [ + { + "bbox": [ + 68, + 482, + 416, + 495 + ], + "type": "text", + "content": "3.2 Measuring how much feedback reviewers incorporate" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 68, + 502, + 541, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 502, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 68, + 502, + 541, + 536 + ], + "type": "text", + "content": "Of the reviewers that updated their review, we wanted to measure what proportion of them incorporated one or more pieces of feedback they were provided. This analysis helped us estimate how many reviewers found the feedback useful." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "spans": [ + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "text", + "content": "We counted a piece of feedback as incorporated if the reviewer clearly integrated some part of the feedback into their modified review. To systematically carry out this analysis, we developed an LLM-based pipeline to run on all updated reviews (see Supplementary Figure S2A). We used the Claude Sonnet 3.5 model to evaluate whether each feedback item received by a reviewer was incorporated into their modified review. See Appendix D for our approach to validating this pipeline. Of the 5,031 reviews that reviewers updated, encompassing 18,322 total feedback items, " + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "inline_equation", + "content": "89\\%" + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "text", + "content": " of reviewers incorporated at least one piece of feedback. This represents " + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "inline_equation", + "content": "23.6\\%" + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "text", + "content": " of all reviewers who received feedback (Figure 4A). In total, we estimate that 12,222 feedback items were incorporated into revised reviews. We also examined the number of feedback items reviewers who updated their reviews received compared to how many feedback items they incorporated (Figure 4B). We see that when reviewers receive fewer feedback items, they are more likely to incorporate more (or even all) of the items. Overall, the average reviewer who updated their review incorporated " + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "inline_equation", + "content": "69.3\\%" + }, + { + "bbox": [ + 70, + 537, + 541, + 692 + ], + "type": "text", + "content": " of the feedback they received; in other words, given 3 pieces of feedback, the average reviewer who updated their review incorporated 2 of them." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 693, + 541, + 717 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 693, + 541, + 717 + ], + "spans": [ + { + "bbox": [ + 68, + 693, + 541, + 717 + ], + "type": "text", + "content": "Below are a few examples of realincorporations reviewers made based on their feedback. We provide the initial review comment they posted, the feedback they received, and then their modified comment." + } + ] + } + ], + "index": 7 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 742, + 309, + 750 + ], + "type": "text", + "content": "8" + } + ] + } + ], + "index": 8 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 94, + 117, + 269, + 245 + ], + "blocks": [ + { + "bbox": [ + 94, + 73, + 100, + 81 + ], + "lines": [ + { + "bbox": [ + 94, + 73, + 100, + 81 + ], + "spans": [ + { + "bbox": [ + 94, + 73, + 100, + 81 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 94, + 117, + 269, + 245 + ], + "lines": [ + { + "bbox": [ + 94, + 117, + 269, + 245 + ], + "spans": [ + { + "bbox": [ + 94, + 117, + 269, + 245 + ], + "type": "image", + "image_path": "39d45c4bf6501e803d5caa9078adf127fb44c19ce3e84bc278beda887ea50cc2.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 284, + 110, + 506, + 273 + ], + "blocks": [ + { + "bbox": [ + 279, + 73, + 285, + 80 + ], + "lines": [ + { + "bbox": [ + 279, + 73, + 285, + 80 + ], + "spans": [ + { + "bbox": [ + 279, + 73, + 285, + 80 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 317, + 86, + 505, + 97 + ], + "lines": [ + { + "bbox": [ + 317, + 86, + 505, + 97 + ], + "spans": [ + { + "bbox": [ + 317, + 86, + 505, + 97 + ], + "type": "text", + "content": "Of 12,222 total feedback items incorporated:" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 284, + 110, + 506, + 273 + ], + "lines": [ + { + "bbox": [ + 284, + 110, + 506, + 273 + ], + "spans": [ + { + "bbox": [ + 284, + 110, + 506, + 273 + ], + "type": "image", + "image_path": "574ace002b44edf3e53c3c7f3c729f721aaf7f0ee5bad955cd6216180e7b5ca4.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "lines": [ + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "spans": [ + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "type": "text", + "content": "Figure 4: (A) Overall incorporation statistics. Through our LLM-based incorporation analysis, we estimate that " + }, + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "type": "inline_equation", + "content": "23.6\\%" + }, + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "type": "text", + "content": " of reviewers who were given feedback incorporated at least one feedback item they were given. This means that " + }, + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "type": "inline_equation", + "content": "89\\%" + }, + { + "bbox": [ + 67, + 290, + 541, + 374 + ], + "type": "text", + "content": " of reviewers who updated their review after receiving feedback incorporated at least one item. (B) Feedback incorporation trends. Here, we illustrate the relationship between the number of feedback items reviewers who updated their review received and how many of those items they incorporated. In total, reviewers incorporated 12,222 feedback items. Notably, reviewers were more likely to incorporate feedback when given fewer items." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 396, + 352, + 408 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 396, + 352, + 408 + ], + "spans": [ + { + "bbox": [ + 85, + 396, + 352, + 408 + ], + "type": "text", + "content": "Incorporation example: Feedback that encourages specificity" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 417, + 525, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 417, + 525, + 441 + ], + "spans": [ + { + "bbox": [ + 83, + 417, + 525, + 441 + ], + "type": "text", + "content": "Original comment: Theorems 1, 2, 3 require the notion of interpolation, which is considered a strong assumption to have." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 451, + 525, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 451, + 525, + 510 + ], + "spans": [ + { + "bbox": [ + 83, + 451, + 525, + 510 + ], + "type": "text", + "content": "Feedback: To make this critique more actionable, it would be helpful to elaborate on why the interpolation assumption is problematic in this context. Are there particular types of federated learning problems where this assumption is unlikely to hold? Providing concrete examples or suggestions for how the authors could relax or justify this assumption would give them clearer guidance on addressing this limitation." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 521, + 525, + 641 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 521, + 525, + 641 + ], + "spans": [ + { + "bbox": [ + 83, + 521, + 525, + 641 + ], + "type": "text", + "content": "Revised comment: Theorems 1, 2, 3 require the notion of interpolation. Even though an explanation of regimes that satisfy this condition is provided, considering that there are previous works [1], [2] that extend beyond that setting, this assumption seems to be an avenue for future work in this field. More specifically, the initial FedProx algorithm [1] is analyzed in the general non-interpolated setting. In addition, the follow-up work regarding the FedExProx algorithm [2] considers in the main paper the interpolated regime. However, the authors provide additionally an illustration of the algorithm's behaviour in the non-interpolated setting (see Appendix F.3 in [2]). In that sense, it would be useful to provide some additional details on the behaviour of the algorithm in the non-interpolated setting or to comment on the main challenges in extending the current proof technique beyond the interpolation framework, offering in that way a more complete picture and direction for future research." + } + ] + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "spans": [ + { + "bbox": [ + 302, + 741, + 309, + 750 + ], + "type": "text", + "content": "9" + } + ] + } + ], + "index": 10 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 73, + 374, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 73, + 374, + 85 + ], + "spans": [ + { + "bbox": [ + 85, + 73, + 374, + 85 + ], + "type": "text", + "content": "Incorporation example: Feedback that identifies content oversight" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 95, + 385, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 95, + 385, + 106 + ], + "spans": [ + { + "bbox": [ + 85, + 95, + 385, + 106 + ], + "type": "text", + "content": "Original reviewer comment: How is the performance on Llama?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 117, + 525, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 117, + 525, + 152 + ], + "spans": [ + { + "bbox": [ + 84, + 117, + 525, + 152 + ], + "type": "text", + "content": "Feedback to the reviewer: The paper appears to provide results on Llama 2-7B in Table 3. Does this address your question? If not, could you clarify what specific aspects of performance on Llama you're interested in that aren't covered by the current results?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 163, + 499, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 163, + 499, + 175 + ], + "spans": [ + { + "bbox": [ + 85, + 163, + 499, + 175 + ], + "type": "text", + "content": "Modified reviewer comment: How is the performance on Llama3 with the alpaca dataset?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 85, + 192, + 389, + 204 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 192, + 389, + 204 + ], + "spans": [ + { + "bbox": [ + 85, + 192, + 389, + 204 + ], + "type": "text", + "content": "Incorporation example: Feedback that makes review more actionable" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 213, + 524, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 213, + 524, + 238 + ], + "spans": [ + { + "bbox": [ + 85, + 213, + 524, + 238 + ], + "type": "text", + "content": "Original reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 84, + 247, + 525, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 247, + 525, + 295 + ], + "spans": [ + { + "bbox": [ + 84, + 247, + 525, + 295 + ], + "type": "text", + "content": "Feedback to the reviewer: To make this comment more actionable, consider asking the authors to discuss potential extensions or challenges in applying their approach to other visual tasks. For example: 'How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?'" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 305, + 525, + 342 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 305, + 525, + 342 + ], + "spans": [ + { + "bbox": [ + 84, + 305, + 525, + 342 + ], + "type": "text", + "content": "Modified reviewer comment: Limited application focus on other visual tasks (e.g., object detection, segmentation) beyond classification. How might the visual memory approach be adapted for tasks like object detection or segmentation? What unique challenges would arise in those domains?" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "spans": [ + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "text", + "content": "Finally, we wanted to assess whether these incorporated reviews were clearer, more specific, and more actionable for authors. To conduct this analysis, we asked two human AI researchers to conduct a blind preference evaluation between the initial and modified pre-rebuttal reviews. Specifically, we focused on reviews in the updated group that received 3-4 feedback items (the average number given) where the proportion of incorporated feedback exceeded 0.60. This threshold was chosen because the average incorporation rate was " + }, + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "inline_equation", + "content": "67\\%" + }, + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "text", + "content": ", and we aimed to assess whether an average updated review with incorporated feedback was perceived as an improvement. Human annotators preferred modified reviews " + }, + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "inline_equation", + "content": "89\\%" + }, + { + "bbox": [ + 67, + 355, + 541, + 453 + ], + "type": "text", + "content": " of the time (out of 100 examples), indicating that reviewers who incorporated feedback consistently produced higher-quality reviews." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 466, + 385, + 478 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 466, + 385, + 478 + ], + "spans": [ + { + "bbox": [ + 68, + 466, + 385, + 478 + ], + "type": "text", + "content": "3.3 Influence of feedback on rebuttals and decisions" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 486, + 541, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 486, + 541, + 534 + ], + "spans": [ + { + "bbox": [ + 67, + 486, + 541, + 534 + ], + "type": "text", + "content": "We next analyzed the impact of being selected to receive feedback on the rebuttal process and decision outcomes. The rebuttal period took place over three weeks between November 12 and December 4, 2024, and was a time when authors could respond to their reviewer's comments as they revised their papers. We examined how the feedback causally impacted different engagement measures during the rebuttal period." + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 83, + 543, + 523, + 593 + ], + "blocks": [ + { + "bbox": [ + 83, + 543, + 523, + 593 + ], + "lines": [ + { + "bbox": [ + 83, + 543, + 523, + 593 + ], + "spans": [ + { + "bbox": [ + 83, + 543, + 523, + 593 + ], + "type": "table", + "html": "
ControlFeedbackNot updatedUpdated
Average length of author rebuttal807855*** (Δ + 6%)840896***
Average length of reviewer replies110116*** (Δ + 5.5%)115129***
", + "image_path": "af1bd13c6bc85f999f39c1860d84dfd23f079d50200f0c7318b39fdd1a4c968b.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "spans": [ + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "text", + "content": "Table 1: Average change in rebuttal and reply length (measured as number of words). We observe that being selected to receive feedback causally increased the length of author rebuttals by an average of 48 words " + }, + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "inline_equation", + "content": "(6\\%;^{**}\\mathrm{p}\\leq 0.001)" + }, + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "text", + "content": " for reviews written by reviewers who were selected to receive feedback, compared to those who were not. We also see that the average length of reviewer replies to author rebuttals is significantly longer among those who were selected to receive feedback, with an average increase of 6 words " + }, + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "inline_equation", + "content": "(5.5\\%;^{**}\\mathrm{p}\\leq 0.001)" + }, + { + "bbox": [ + 67, + 601, + 541, + 673 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 68, + 685, + 541, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 685, + 541, + 721 + ], + "spans": [ + { + "bbox": [ + 68, + 685, + 541, + 721 + ], + "type": "text", + "content": "In the first row of Table 1, we observed that authors posted rebuttals that were, on average, " + }, + { + "bbox": [ + 68, + 685, + 541, + 721 + ], + "type": "inline_equation", + "content": "6\\%" + }, + { + "bbox": [ + 68, + 685, + 541, + 721 + ], + "type": "text", + "content": " longer (48 words) to reviews written by reviewers who were selected to receive feedback, which is significantly longer than those posted to reviews in the control group. In other words, authors were generally more engaged when" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "10" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 144 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 144 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 144 + ], + "type": "text", + "content": "their reviewer was selected to receive feedback. This could be because the feedback led to clearer and more actionable reviews, allowing authors to more effectively address and respond to the reviewer's comments with more detailed rebuttals. In the second row of Table 1, we also saw that reviewers who were selected to receive feedback responded to these rebuttals with replies that were, on average, " + }, + { + "bbox": [ + 67, + 72, + 541, + 144 + ], + "type": "inline_equation", + "content": "5.5\\%" + }, + { + "bbox": [ + 67, + 72, + 541, + 144 + ], + "type": "text", + "content": " longer (6 words) than those who were not selected, again highlighting increased engagement among reviewers if they were in the feedback group." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "spans": [ + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "text", + "content": "This increased engagement is reflected in the percentage of reviewers who edited one or more of their scores for a paper during the rebuttal period. We found that " + }, + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "inline_equation", + "content": "31.7\\%" + }, + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "text", + "content": " of reviewers who received feedback edited their scores, compared to " + }, + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "inline_equation", + "content": "30.6\\%" + }, + { + "bbox": [ + 67, + 144, + 541, + 215 + ], + "type": "text", + "content": " of those who did not, consistent with receiving feedback being associated with greater reviewer-author engagement. Overall, these findings lead us to conclude that authors were better able to address their reviewers' original concerns during the rebuttal period if their reviewer was selected to receive feedback, leading to more engagement and satisfaction among both groups." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "spans": [ + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "text", + "content": "Finally, we evaluated whether papers with reviews that were selected to receive feedback had a different acceptance rate than those that were not. We compared the acceptance rates of the control and feedback groups, defining the control group as all papers where no reviews were selected to receive feedback and the feedback group as those where at least one review was selected to receive feedback. While there was a slightly higher acceptance rate of " + }, + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "inline_equation", + "content": "32.3\\%" + }, + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "text", + "content": " among papers in the feedback group, compared to " + }, + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "inline_equation", + "content": "30.8\\%" + }, + { + "bbox": [ + 67, + 216, + 541, + 312 + ], + "type": "text", + "content": " among the control group, this difference was not statistically significant. This indicates that while receiving feedback promoted more engaged and thorough discussions among reviewers and authors, it did not substantially change acceptance rates." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 325, + 372, + 339 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 325, + 372, + 339 + ], + "spans": [ + { + "bbox": [ + 68, + 325, + 372, + 339 + ], + "type": "text", + "content": "3.4 Clustering analysis of the feedback comments" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 70, + 388, + 231, + 475 + ], + "blocks": [ + { + "bbox": [ + 72, + 356, + 81, + 365 + ], + "lines": [ + { + "bbox": [ + 72, + 356, + 81, + 365 + ], + "spans": [ + { + "bbox": [ + 72, + 356, + 81, + 365 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 70, + 388, + 231, + 475 + ], + "lines": [ + { + "bbox": [ + 70, + 388, + 231, + 475 + ], + "spans": [ + { + "bbox": [ + 70, + 388, + 231, + 475 + ], + "type": "image", + "image_path": "e9b2135417f8a4a3154218044a289992001ca73745da1559da65cd735992daca.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 228, + 365, + 358, + 488 + ], + "blocks": [ + { + "bbox": [ + 228, + 365, + 358, + 488 + ], + "lines": [ + { + "bbox": [ + 228, + 365, + 358, + 488 + ], + "spans": [ + { + "bbox": [ + 228, + 365, + 358, + 488 + ], + "type": "image", + "image_path": "7c456f8feec7c678459b5c72845568ab8bcd9659db979be35d064f3fe484cfb5.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 359, + 357, + 365, + 365 + ], + "lines": [ + { + "bbox": [ + 359, + 357, + 365, + 365 + ], + "spans": [ + { + "bbox": [ + 359, + 357, + 365, + 365 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 371, + 367, + 536, + 488 + ], + "blocks": [ + { + "bbox": [ + 371, + 367, + 536, + 488 + ], + "lines": [ + { + "bbox": [ + 371, + 367, + 536, + 488 + ], + "spans": [ + { + "bbox": [ + 371, + 367, + 536, + 488 + ], + "type": "image", + "image_path": "4dc4fc56dbe55e0e8da3ad87e006978c1a53d8a96d528471693d75f4bb8980bb.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "lines": [ + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "spans": [ + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "text", + "content": "Figure 5: (A) Feedback clusters. We used an LLM to group all the feedback items we provided to reviewers into five distinct clusters based on the text. We found that nearly half of the feedback was directed at asking the reviewer to 'clarify methodological concerns to make their request specific and actionable.' The next most popular cluster was feedback asking the reviewer to 'clarify their request by adding specific analyses, baselines, or references.' (B) Incorporation rate by cluster. We measured the percentage of feedback items within each cluster that reviewers incorporated. Overall, " + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "inline_equation", + "content": "17.7\\%" + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "text", + "content": " of all feedback was incorporated. When examined by cluster, incorporation rates ranged from " + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 67, + 522, + 541, + 618 + ], + "type": "text", + "content": ", with no statistically significant differences observed." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 627, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 627, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 627, + 541, + 723 + ], + "type": "text", + "content": "To gain more insights into what types of feedback were provided by the AI agent, we present a quantitative clustering analysis of the feedback generated. Of the 69,836 total feedback items the agent gave to reviewers, we characterized the feedback by generating distinct clusters. Inspired by [27, 28], we generated five clusters on 1000 randomly sampled feedback items using an LLM; we repeated this process twice to ensure robustness. Of the 10 candidate clusters, we prompted a second LLM to select five non-overlapping clusters, giving us the five clusters we present below. Finally, we prompted a third LLM to assign the feedback items to one of the five clusters so we could determine what proportion of feedback belonged to each cluster. We depict the five clusters in Figure 5A, as well as examples of feedback items that would fall into each cluster in Table 2." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "11" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 69, + 563, + 526 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 563, + 526 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 563, + 526 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 563, + 526 + ], + "type": "table", + "html": "
ClusterReviewer commentFeedback to the reviewer
Clarify methodological concerns to make request specific and actionableThe experimental setup and tool flow used to conduct the experiments should be described in more detail.To make this feedback more actionable, it would be helpful to specify what aspects of the experimental setup and tool flow you feel are missing. For example, are you looking for more details on the hardware specifications used for CPU/GPU/FPGA experiments, software frameworks and versions, training hyperparameters, or quantization settings?
Clarify request by adding specific analyses, baselines, or referencesAll the experiment examples are numerical and toy, and there is a large mismatch with real physical engineering systems. Therefore, I do not think they are convincing.To make this critique more constructive, you could suggest specific real-world physical systems or benchmarks that would provide a more convincing evaluation of the method. This would give the authors concrete ideas for improving their experimental validation.
Clarify suggestions to visuals or formatA few suggestions, Fig. 2 is very hard to understand. Is there another way to convey the same information which is easier to read?To make this feedback more constructive, try to articulate what specific aspects of Figure 2 are difficult to understand. You could suggest alternative visualization formats or specific elements that need clarification. This would help the authors know how to improve the figure.
Request authors expand on methodological limitations or broader implicationsCSP can only support binary labels.To make this comment more constructive, you could ask the authors to discuss the implications of this limitation for real-world applications and whether they have considered any extensions to support multi-class problems.
Clarify desired improvements to theoretical analysis or proofsHow standard are the assumptions (Assumptions C.1 and C.2) required for proving Theorems 4.1 and 4.2? It would be helpful to see more justification regarding their validity (e.g. other papers building based upon similar assumptions).This is an excellent question that probes the theoretical foundations of the paper. To make it more actionable, you could ask the authors to provide specific examples of other works in the field that rely on similar assumptions, or to explain how these assumptions compare to those typically used in related theoretical analyses. This would help contextualize the theoretical contributions within the broader literature.
", + "image_path": "b87b6b6a84e207829cfe46a705418008d45b8586d798978d7b7854b20f519099.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 538, + 541, + 563 + ], + "lines": [ + { + "bbox": [ + 68, + 538, + 541, + 563 + ], + "spans": [ + { + "bbox": [ + 68, + 538, + 541, + 563 + ], + "type": "text", + "content": "Table 2: Examples of AI-generated feedback that belong to each of the five main clusters. We also provide the original review comment that triggered the generation of the feedback." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 582, + 541, + 654 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 582, + 541, + 654 + ], + "spans": [ + { + "bbox": [ + 67, + 582, + 541, + 654 + ], + "type": "text", + "content": "These clusters indicate that the vast majority of feedback provided was addressed towards vague review comments and aimed to make them more specific, actionable, and justified. We saw that the agent rarely chose to comment on content misunderstandings, in large part because it had to be absolutely certain there was an error and provide a direct quote from the paper highlighting the mistake as we did not tolerate any hallucinations. Therefore, we saw that the model would err on the side of caution and not provide many comments related to that category." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "text", + "content": "We also sought to measure the percentage of feedback items within each cluster that were incorporated by reviewers, as shown in Figure 5B. Overall, out of the 69,836 feedback items given, we found that " + }, + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "inline_equation", + "content": "17.7\\%" + }, + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "text", + "content": " of all feedback was incorporated. On a cluster basis, we found that the 'clarify request by adding specific analyses, baselines, or references' and 'clarify desired improvements to theoretical analysis or proofs' clusters had the highest incorporation rate at " + }, + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "inline_equation", + "content": "18\\%" + }, + { + "bbox": [ + 67, + 654, + 541, + 715 + ], + "type": "text", + "content": ". The 'clarify suggestions to visuals or format' cluster had the" + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "12" + } + ] + } + ], + "index": 4 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 542, + 109 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 542, + 109 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 542, + 109 + ], + "type": "text", + "content": "lowest incorporation rate at " + }, + { + "bbox": [ + 67, + 72, + 542, + 109 + ], + "type": "inline_equation", + "content": "14\\%" + }, + { + "bbox": [ + 67, + 72, + 542, + 109 + ], + "type": "text", + "content": ". Overall, we do not see statistically significant differences in incorporation rates among the clusters, implying that reviewers did not find certain categories of feedback to be more or less useful than others." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 126, + 203, + 143 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 126, + 203, + 143 + ], + "spans": [ + { + "bbox": [ + 69, + 126, + 203, + 143 + ], + "type": "text", + "content": "4 Related Works" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "spans": [ + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "text", + "content": "Due to their extensive capabilities, LLMs are being used across every stage of the peer review process. Reviewers increasingly use LLMs to assist in drafting peer reviews [15, 29, 30]. An estimated " + }, + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "inline_equation", + "content": "17.5\\%" + }, + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "text", + "content": " of authors of Computer Science abstracts on arXiv [31] and " + }, + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "inline_equation", + "content": "10.6\\%" + }, + { + "bbox": [ + 67, + 151, + 541, + 223 + ], + "type": "text", + "content": " of reviewers at ICLR 2024 [16] used LLMs for writing assistance. Other studies have shown the potential of LLMs to make the entire review pipeline more efficient across various stages [32, 33, 34, 35] such as writing manuscripts [36], initial quality control [37, 38, 27], and even providing AI-generated instructions for how to write reviews [39]." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "spans": [ + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "content": "As peer review workloads continue to increase, LLMs present an opportunity to alleviate some of the burden on human reviewers by providing reviews of submitted manuscripts. In a prospective survey study, 308 researchers from 110 institutions received GPT-4-generated feedback on their papers. Of these, " + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "inline_equation", + "content": "57.4\\%" + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "content": " found the feedback helpful, and " + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "inline_equation", + "content": "82.4\\%" + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "content": " felt it was more useful than the feedback provided by at least some human reviewers [12]. Building off of this work, [40] proposed a multi-agent review generation system that improved the specificity and helpfulness of feedback provided compared to GPT-4, reducing the rate of generic comments from " + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "inline_equation", + "content": "60\\%" + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "inline_equation", + "content": "29\\%" + }, + { + "bbox": [ + 67, + 223, + 542, + 306 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 306, + 541, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 306, + 541, + 390 + ], + "spans": [ + { + "bbox": [ + 67, + 306, + 541, + 390 + ], + "type": "text", + "content": "Furthermore, LLMs offer an efficient and possibly less biased alternative to human evaluations; [41] found that human evaluators of peer reviews were highly susceptible to bias from review length and paper score, as there were high levels of subjectivity among reviewers. These findings suggest that integrating LLMs into the review evaluation process could standardize assessments and reduce inconsistencies. As LLM-based tools continue to evolve, they hold the potential to improve both the speed and quality of manuscript evaluations. Our experiment is the first to demonstrate how LLMs can improve the peer review process on a large scale, highlighting their practical benefits." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 390, + 542, + 499 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 390, + 542, + 499 + ], + "spans": [ + { + "bbox": [ + 67, + 390, + 542, + 499 + ], + "type": "text", + "content": "However, despite these advancements, no prior studies had specifically examined how LLMs could be used to provide feedback on peer reviews in the areas we focused on in our experiment. A study released after our ICLR experiment, however, introduced a benchmark to identify toxicity in peer reviews [42]. The authors identified four categories of toxic comments: using emotive or sarcastic language, vague or overly critical feedback, personal attacks, and excessive negativity. These categories align closely with the ones we chose for our agent to provide feedback on. The authors benchmarked several LLMs for detecting toxicity and tested their ability to revise toxic sentences, finding that human evaluators preferred " + }, + { + "bbox": [ + 67, + 390, + 542, + 499 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 67, + 390, + 542, + 499 + ], + "type": "text", + "content": " of these revisions. In future iterations of our Review Feedback Agent, this benchmark could offer a valuable tool for testing our pipeline's ability to detect toxicity and offer constructive feedback." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 516, + 171, + 533 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 516, + 171, + 533 + ], + "spans": [ + { + "bbox": [ + 69, + 516, + 171, + 533 + ], + "type": "text", + "content": "5 Discussion" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 540, + 541, + 635 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 540, + 541, + 635 + ], + "spans": [ + { + "bbox": [ + 67, + 540, + 541, + 635 + ], + "type": "text", + "content": "Our research demonstrates the significant potential of LLM-based systems to enhance peer review quality at scale. By providing targeted feedback to reviewers at ICLR 2025, we observed meaningful improvements in review specificity, engagement, and actionability. We saw that " + }, + { + "bbox": [ + 67, + 540, + 541, + 635 + ], + "type": "inline_equation", + "content": "27\\%" + }, + { + "bbox": [ + 67, + 540, + 541, + 635 + ], + "type": "text", + "content": " of reviewers updated their reviews, and an overwhelming majority of those who made updates incorporated at least one piece of feedback into their modifications. Blinded AI researchers found the updated reviews to be consistently more clear and informative. Furthermore, feedback intervention led to increased engagement throughout the review process, with longer reviews, rebuttals, and reviewer responses, suggesting more involved discussions between authors and reviewers." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 636, + 542, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 636, + 542, + 721 + ], + "spans": [ + { + "bbox": [ + 67, + 636, + 542, + 721 + ], + "type": "text", + "content": "We designed the AI feedback system to enhance reviews while ensuring human reviewers retain complete control. First, the AI-generated feedback was purely optional, and reviewers could decide whether to incorporate it or not; by default, they could opt out by ignoring the feedback. Second, human reviewers had full control over the final review and the scores visible to the authors. To reduce the risk of hallucination, the AI feedback had to pass several rigorous reliability tests before being shared with reviewers. Finally, no personal or identifiable information about reviewers or authors was disclosed to the agent. An IRB review deemed the system to be low risk." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 751 + ], + "type": "text", + "content": "13" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 72, + 541, + 191 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 72, + 541, + 191 + ], + "spans": [ + { + "bbox": [ + 67, + 72, + 541, + 191 + ], + "type": "text", + "content": "Going forward, there are several directions to further improve the Review Feedback Agent. Our feedback categories focused on three main areas (improving specificity, addressing misunderstandings, and ensuring professionalism). While these categories were derived from reviewer guides and previous studies and encompass the majority of author complaints, they may not capture all aspects of review quality. Expanding to other categories would be helpful. Additionally, it would be interesting to explore the use of reasoning models to generate more nuanced feedback for complex issues in reviews. Finally, the concept of developing reliability tests for LLMs is an evolving field, with new studies emerging after our experiment [43, 44], and we hope to incorporate ideas from these recent works to improve the robustness of our framework. Ultimately, we expect that running this agent at future AI conferences across a diverse range of research topics will improve its robustness and effectiveness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 192, + 541, + 301 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 192, + 541, + 301 + ], + "spans": [ + { + "bbox": [ + 67, + 192, + 541, + 301 + ], + "type": "text", + "content": "CS conferences have long leveraged machine learning to enhance their peer review processes. One early example is the Toronto Paper Matching algorithm, which was used in NIPS 2010 to match papers with reviewers and has since been deployed by over 50 conferences [45]. However, the impact of many of these earlier applications of machine learning has not been rigorously quantified. To address this gap, we were motivated to conduct this randomized controlled study to rigorously evaluate the effects of review feedback before broader deployment. Our findings show that by striving to make reviews more informative for authors, the Review Feedback Agent has the potential to enhance the overall quality of scientific communication. As LLM capabilities continue to advance, we anticipate even more advanced systems that can provide tailored feedback to reviewers, ultimately benefiting the entire scientific community through improved peer review." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 317, + 206, + 333 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 317, + 206, + 333 + ], + "spans": [ + { + "bbox": [ + 69, + 317, + 206, + 333 + ], + "type": "text", + "content": "Acknowledgements" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 342, + 541, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 342, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 342, + 541, + 392 + ], + "type": "text", + "content": "We would like to thank Celeste Martínez and Carlos Mondragon Chapa at OpenReview for their help in integrating our agent into the OpenReview interface. We would also like to thank Alex Tamkin and Anthropic for helping us increase our rate limits. Finally, we would like to thank members of the Zou group for their support and comments on this work." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 407, + 228, + 423 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 407, + 228, + 423 + ], + "spans": [ + { + "bbox": [ + 69, + 407, + 228, + 423 + ], + "type": "text", + "content": "Author Contributions" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 432, + 541, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 432, + 541, + 469 + ], + "spans": [ + { + "bbox": [ + 67, + 432, + 541, + 469 + ], + "type": "text", + "content": "NT, MY, JS, and JZ designed, developed, and deployed the Review Feedback Agent, conducted analyses, and wrote the paper. AG, NP, FS, RY, and CV are program chairs of ICLR 2025 and provided guidance on the feedback study and analysis." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 486, + 149, + 502 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 486, + 149, + 502 + ], + "spans": [ + { + "bbox": [ + 70, + 486, + 149, + 502 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 75, + 510, + 541, + 723 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 75, + 510, + 541, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 510, + 541, + 536 + ], + "spans": [ + { + "bbox": [ + 75, + 510, + 541, + 536 + ], + "type": "text", + "content": "[1] Bruce Alberts, Brooks Hanson, and Katrina L. Kelner. Editorial: Reviewing peer review. Science, 321(5885):15-15, 2008." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 75, + 542, + 541, + 567 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 542, + 541, + 567 + ], + "spans": [ + { + "bbox": [ + 75, + 542, + 541, + 567 + ], + "type": "text", + "content": "[2] Jacalyn Kelly, Tara Sadeghieh, and Khosrow Adeli. Peer review in scientific publications: benefits, critiques, & a survival guide. *Ejifcc*, 25(3):227, 2014." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 75, + 573, + 306, + 586 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 573, + 306, + 586 + ], + "spans": [ + { + "bbox": [ + 75, + 573, + 306, + 586 + ], + "type": "text", + "content": "[3] Publons. Global state of peer review 2018, 2018." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 75, + 592, + 541, + 616 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 592, + 541, + 616 + ], + "spans": [ + { + "bbox": [ + 75, + 592, + 541, + 616 + ], + "type": "text", + "content": "[4] Ariful Azad and Afeefa Banu. Publication trends in artificial intelligence conferences: The rise of super prolific authors, 2024." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 75, + 624, + 541, + 659 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 624, + 541, + 659 + ], + "spans": [ + { + "bbox": [ + 75, + 624, + 541, + 659 + ], + "type": "text", + "content": "[5] Alison McCook. Is peer review broken? submissions are up, reviewers are overtaxed, and authors are lodging complaint after complaint about the process at top-tier journals. what's wrong with peer review?, 2006." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 75, + 667, + 249, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 667, + 249, + 680 + ], + "spans": [ + { + "bbox": [ + 75, + 667, + 249, + 680 + ], + "type": "text", + "content": "[6] ICLR. Iclr 2024 press release, 2024." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 75, + 686, + 541, + 723 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 686, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 75, + 686, + 541, + 723 + ], + "type": "text", + "content": "[7] Anna Rogers and Isabelle Augenstein. What can we do to improve peer review in NLP? In Trevor Cohn, Yulan He, and Yang Liu, editors, Findings of the Association for Computational Linguistics: EMNLP 2020, pages 1256–1262, Online, November 2020. Association for Computational Linguistics." + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 312, + 750 + ], + "type": "text", + "content": "14" + } + ] + } + ], + "index": 15 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 541, + 693 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 75, + 71, + 541, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 71, + 541, + 109 + ], + "spans": [ + { + "bbox": [ + 75, + 71, + 541, + 109 + ], + "type": "text", + "content": "[8] Anna Rogers, Marzena Karpinska, Jordan Boyd-Graber, and Naoaki Okazaki. Program chairs' report on peer review at acl 2023. In Proceedings of the 61st Annual Meeting of the Association for Computational Linguistics (Volume 1: Long Papers), pages x1-lxxv, 2023." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 75, + 115, + 434, + 129 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 75, + 115, + 434, + 129 + ], + "spans": [ + { + "bbox": [ + 75, + 115, + 434, + 129 + ], + "type": "text", + "content": "[9] Martijn Arns. Open access is tiring out peer reviewers. Nature, 515:467, 2014." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 71, + 135, + 540, + 161 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 135, + 540, + 161 + ], + "spans": [ + { + "bbox": [ + 71, + 135, + 540, + 161 + ], + "type": "text", + "content": "[10] Corinna Cortes and Neil D. Lawrence. Inconsistency in conference peer review: Revisiting the 2014 neurips experiment, 2021." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 167, + 250, + 180 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 167, + 250, + 180 + ], + "spans": [ + { + "bbox": [ + 70, + 167, + 250, + 180 + ], + "type": "text", + "content": "[11] Anthropic. Claude 3.5 sonnet, 2024." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 71, + 186, + 541, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 186, + 541, + 224 + ], + "spans": [ + { + "bbox": [ + 71, + 186, + 541, + 224 + ], + "type": "text", + "content": "[12] Weixin Liang, Yuhui Zhang, Hancheng Cao, Binglu Wang, Daisy Yi Ding, Xinyu Yang, Kailas Vodra-halli, Siyu He, Daniel Scott Smith, Yian Yin, et al. Can large language models provide useful feedback on research papers? a large-scale empirical analysis. NEJM AI, 1(8):AIoa2400196, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 231, + 539, + 256 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 231, + 539, + 256 + ], + "spans": [ + { + "bbox": [ + 71, + 231, + 539, + 256 + ], + "type": "text", + "content": "[13] Mert Yuksekgonul, Federico Bianchi, Joseph Boen, Sheng Liu, Zhi Huang, Carlos Guestrin, and James Zou. Textgrad: Automatic \"differentiation\" via text, 2024." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 262, + 539, + 312 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 262, + 539, + 312 + ], + "spans": [ + { + "bbox": [ + 71, + 262, + 539, + 312 + ], + "type": "text", + "content": "[14] Aman Madaan, Niket Tandon, Prakhar Gupta, Skyler Hallinan, Luyu Gao, Sarah Wiegreffe, Uri Alon, Nouha Dziri, Shrimai Prabhumoye, Yiming Yang, Shashank Gupta, Bodhisattwa Prasad Majumder, Katherine Hermann, Sean Welleck, Amir Yazdanbakhsh, and Peter Clark. Self-refine: Iterative refinement with self-feedback, 2023." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 71, + 319, + 539, + 355 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 319, + 539, + 355 + ], + "spans": [ + { + "bbox": [ + 71, + 319, + 539, + 355 + ], + "type": "text", + "content": "[15] Mohammad Hosseini and Serge P J M Horbach. Fighting reviewer fatigue or amplifying bias? considerations and recommendations for use of chatgpt and other large language models in scholarly peer review. Research Integrity and Peer Review, 2023." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 71, + 361, + 539, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 361, + 539, + 399 + ], + "spans": [ + { + "bbox": [ + 71, + 361, + 539, + 399 + ], + "type": "text", + "content": "[16] Weixin Liang, Zachary Izzo, Yaohui Zhang, Haley Lepp, Hancheng Cao, Xuandong Zhao, Lingjiao Chen, Haotian Ye, Sheng Liu, Zhi Huang, Daniel A. McFarland, and James Y. Zou. Monitoring ai-modified content at scale: A case study on the impact of chatgpt on ai conference peer reviews, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 71, + 406, + 539, + 443 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 406, + 539, + 443 + ], + "spans": [ + { + "bbox": [ + 71, + 406, + 539, + 443 + ], + "type": "text", + "content": "[17] Yue Zhang, Yafu Li, Leyang Cui, Deng Cai, Lemao Liu, Tingchen Fu, Xinting Huang, Enbo Zhao, Yu Zhang, Yulong Chen, et al. Siren's song in the ai ocean: a survey on hallucination in large language models. arXiv preprint arXiv:2309.01219, 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 450, + 539, + 486 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 450, + 539, + 486 + ], + "spans": [ + { + "bbox": [ + 71, + 450, + 539, + 486 + ], + "type": "text", + "content": "[18] Jeffrey Zhou, Tianjian Lu, Swaroop Mishra, Siddhartha Brahma, Sujoy Basu, Yi Luan, Denny Zhou, and Le Hou. Instruction-following evaluation for large language models. arXiv preprint arXiv:2311.07911, 2023." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 71, + 494, + 384, + 507 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 494, + 384, + 507 + ], + "spans": [ + { + "bbox": [ + 71, + 494, + 384, + 507 + ], + "type": "text", + "content": "[19] ICML 2023 program committee. Icml 2023 reviewer tutorial, 2023." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 71, + 514, + 509, + 528 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 514, + 509, + 528 + ], + "spans": [ + { + "bbox": [ + 71, + 514, + 509, + 528 + ], + "type": "text", + "content": "[20] ICML 2022 Program Chairs. How to be a good reviewer? reviewer tutorial for icml 2022, 2022." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 71, + 534, + 326, + 547 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 534, + 326, + 547 + ], + "spans": [ + { + "bbox": [ + 71, + 534, + 326, + 547 + ], + "type": "text", + "content": "[21] ACL PC Chairs. Last minute reviewing advice, 2017." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 71, + 553, + 539, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 553, + 539, + 578 + ], + "spans": [ + { + "bbox": [ + 71, + 553, + 539, + 578 + ], + "type": "text", + "content": "[22] Matias Valdenegro. Lxcv @ cvpr 2021 reviewer mentoring program: And how to write good reviews, 2021." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 71, + 586, + 373, + 599 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 586, + 373, + 599 + ], + "spans": [ + { + "bbox": [ + 71, + 586, + 373, + 599 + ], + "type": "text", + "content": "[23] Isabelle Augenstein Anna Rogers. Arr reviewer guidelines, 2021." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 71, + 605, + 539, + 630 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 605, + 539, + 630 + ], + "spans": [ + { + "bbox": [ + 71, + 605, + 539, + 630 + ], + "type": "text", + "content": "[24] Nyssa J Silbiger and Amber D Stubler. Unprofessional peer reviews disproportionately harm underrepresented groups in stem. PeerJ, 7:e8247, 2019." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 71, + 637, + 539, + 663 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 637, + 539, + 663 + ], + "spans": [ + { + "bbox": [ + 71, + 637, + 539, + 663 + ], + "type": "text", + "content": "[25] Mathieu Fenniak, Matthew Stamy, pubpub zz, Martin Thoma, Matthew Peveler, exiledkingcc, and pypdf Contributors. The pypdf library, 2024." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 71, + 670, + 539, + 693 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 670, + 539, + 693 + ], + "spans": [ + { + "bbox": [ + 71, + 670, + 539, + 693 + ], + "type": "text", + "content": "[26] Marco Tulio Ribeiro and Scott Lundberg. Testing language models (and prompts) like we test software, 2023." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "15" + } + ] + } + ], + "index": 20 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 70, + 71, + 541, + 696 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 70, + 71, + 540, + 109 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 71, + 540, + 109 + ], + "spans": [ + { + "bbox": [ + 70, + 71, + 540, + 109 + ], + "type": "text", + "content": "[27] Alexander Goldberg, Ihsan Ullah, Thanh Gia Hieu Khuong, Benedictus Kent Rachmat, Zhen Xu, Isabelle Guyon, and Nihar B. Shah. Usefulness of llms as an author checklist assistant for scientific papers: Neurips'24 experiment, 2024." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 114, + 541, + 165 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 541, + 165 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 541, + 165 + ], + "type": "text", + "content": "[28] Alex Tamkin, Miles McCain, Kunal Handa, Esin Durmus, Liane Lovitt, Ankur Rathi, Saffron Huang, Alfred Mountfield, Jerry Hong, Stuart Ritchie, Michael Stern, Brian Clarke, Landon Goldberg, Theodore R. Sumers, Jared Mueller, William McEachen, Wes Mitchell, Shan Carter, Jack Clark, Jared Kaplan, and Deep Ganguli. Clio: Privacy-preserving insights into real-world ai use, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 170, + 539, + 197 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 170, + 539, + 197 + ], + "spans": [ + { + "bbox": [ + 70, + 170, + 539, + 197 + ], + "type": "text", + "content": "[29] Ryan Liu and Nihar B. Shah. Reviewergpt? an exploratory study on using large language models for paper reviewing, 2023." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 201, + 539, + 228 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 201, + 539, + 228 + ], + "spans": [ + { + "bbox": [ + 70, + 201, + 539, + 228 + ], + "type": "text", + "content": "[30] Som Biswas, Dushyant Dobaria, and Harris L. Cohen. Chatgpt and the future of journal reviews: A feasibility study. The Yale Journal of Biology and Medicine, 96(3):415-420, 2023." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 234, + 539, + 272 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 234, + 539, + 272 + ], + "spans": [ + { + "bbox": [ + 70, + 234, + 539, + 272 + ], + "type": "text", + "content": "[31] Weixin Liang, Yaohui Zhang, Zhengxuan Wu, Haley Lepp, Wenlong Ji, Xuandong Zhao, Hancheng Cao, Sheng Liu, Siyu He, Zhi Huang, Diyi Yang, Christopher Potts, Christopher D Manning, and James Y. Zou. Mapping the increasing use of llms in scientific papers, 2024." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 71, + 277, + 539, + 304 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 277, + 539, + 304 + ], + "spans": [ + { + "bbox": [ + 71, + 277, + 539, + 304 + ], + "type": "text", + "content": "[32] Nihar B. Shah. Challenges, experiments, and computational solutions in peer review. Commun. ACM, 65(6):76-87, May 2022." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 71, + 309, + 539, + 336 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 309, + 539, + 336 + ], + "spans": [ + { + "bbox": [ + 71, + 309, + 539, + 336 + ], + "type": "text", + "content": "[33] Simon Price and Peter A. Flach. Computational support for academic peer review: a perspective from artificial intelligence. *Commun. ACM*, 60(3):70-79, February 2017." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 70, + 342, + 541, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 342, + 541, + 367 + ], + "spans": [ + { + "bbox": [ + 70, + 342, + 541, + 367 + ], + "type": "text", + "content": "[34] Atreyi Kankanhalli. Peer review in the age of generative ai. Journal of the Association for Information Systems, 25(1), 2024." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 70, + 373, + 539, + 435 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 373, + 539, + 435 + ], + "spans": [ + { + "bbox": [ + 70, + 373, + 539, + 435 + ], + "type": "text", + "content": "[35] Ilia Kuznetsov, Osama Mohammed Afzal, Koen Dercksen, Nils Dycke, Alexander Goldberg, Tom Hope, Dirk Hovy, Jonathan K. Kummerfeld, Anne Lauscher, Kevin Leyton-Brown, Sheng Lu, Mausam, Margot Mieskes, Aurélie Néveol, Danish Pruthi, Lizhen Qu, Roy Schwartz, Noah A. Smith, Thamar Solorio, Jingyan Wang, Xiaodan Zhu, Anna Rogers, Nihar B. Shah, and Iryna Gurevych. What can natural language processing do for peer review?, 2024." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 70, + 441, + 539, + 479 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 441, + 539, + 479 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 539, + 479 + ], + "type": "text", + "content": "[36] Tiffany I Leung, Taiane de Azevedo Cardoso, Amaryllis Mavragani, and Gunther Eysenbach. Best practices for using ai tools as an author, peer reviewer, or editor. J Med Internet Res, 25:e51584, Aug 2023." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 71, + 485, + 539, + 511 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 485, + 539, + 511 + ], + "spans": [ + { + "bbox": [ + 71, + 485, + 539, + 511 + ], + "type": "text", + "content": "[37] Alessandro Checco, Lorenzo Bracciale, Pierpaolo Loreti, Stephen Pinfield, and Giuseppe Bianchi. AI-assisted peer review. Humanities and Social Sciences Communications, 2021." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 70, + 517, + 539, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 517, + 539, + 544 + ], + "spans": [ + { + "bbox": [ + 70, + 517, + 539, + 544 + ], + "type": "text", + "content": "[38] Kayvan Kousha and Mike Thelwall. Artificial intelligence to support publishing and peer review: A summary and review. Learned Publishing, 37(1):4-12, 2024." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 70, + 548, + 539, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 548, + 539, + 622 + ], + "spans": [ + { + "bbox": [ + 70, + 548, + 539, + 622 + ], + "type": "text", + "content": "[39] Xiaotian Su, Thiemo Wambsgangss, Roman Rietsche, Seyed Parsa Neshaei, and Tanja Käser. Reviewwriter: AI-generated instructions for peer review writing. In Ekaterina Kochmar, Jill Burstein, Andrea Horbach, Ronja Laarmann-Quante, Nitin Madnani, Anaïs Tack, Victoria Yaneva, Zheng Yuan, and Torsten Zesch, editors, Proceedings of the 18th Workshop on Innovative Use of NLP for Building Educational Applications (BEA 2023), pages 57–71, Toronto, Canada, July 2023. Association for Computational Linguistics." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 70, + 628, + 539, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 628, + 539, + 654 + ], + "spans": [ + { + "bbox": [ + 70, + 628, + 539, + 654 + ], + "type": "text", + "content": "[40] Mike D'Arcy, Tom Hope, Larry Birnbaum, and Doug Downey. Marg: Multi-agent review generation for scientific papers, 2024." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 70, + 660, + 539, + 696 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 660, + 539, + 696 + ], + "spans": [ + { + "bbox": [ + 70, + 660, + 539, + 696 + ], + "type": "text", + "content": "[41] Alexander Goldberg, Ivan Stelmakh, Kyunghyun Cho, Alice Oh, Alekh Agarwal, Danielle Belgrave, and Nihar B. Shah. Peer reviews of peer reviews: A randomized controlled trial and other experiments, 2024." + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "16" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 538, + 208 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 70, + 72, + 538, + 106 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 72, + 538, + 106 + ], + "spans": [ + { + "bbox": [ + 70, + 72, + 538, + 106 + ], + "type": "text", + "content": "[42] Man Luo, Bradley Peterson, Rafael Gan, Hari Ramalingame, Navya Gangrade, Ariadne Dimarogona, Imon Banerjee, and Phillip Howard. Benchmark on peer review toxic detection: A challenging task with a new dataset, 2025." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 70, + 114, + 538, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 114, + 538, + 148 + ], + "spans": [ + { + "bbox": [ + 70, + 114, + 538, + 148 + ], + "type": "text", + "content": "[43] Jon Saad-Falcon, Rajan Vivek, William Berrios, Nandita Shankar Naik, Matija Franklin, Bertie Vidgen, Amanpreet Singh, Douwe Kiela, and Shikib Mehri. Lmunit: Fine-grained evaluation with natural language unit tests, 2024." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 156, + 538, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 156, + 538, + 178 + ], + "spans": [ + { + "bbox": [ + 69, + 156, + 538, + 178 + ], + "type": "text", + "content": "[44] Archiki Prasad, Elias Stengel-Eskin, Justin Chih-Yao Chen, Zaid Khan, and Mohit Bansal. Learning to generate unit tests for automated debugging, 2025." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 185, + 537, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 185, + 537, + 208 + ], + "spans": [ + { + "bbox": [ + 69, + 185, + 537, + 208 + ], + "type": "text", + "content": "[45] Laurent Charlin, Richard S Zemel, and Craig Boutilier. A framework for optimizing paper matching In UAI, volume 11, pages 86-95, 2011." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 70, + 237, + 189, + 259 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 237, + 189, + 259 + ], + "spans": [ + { + "bbox": [ + 70, + 237, + 189, + 259 + ], + "type": "text", + "content": "Appendices" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 273, + 209, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 273, + 209, + 289 + ], + "spans": [ + { + "bbox": [ + 69, + 273, + 209, + 289 + ], + "type": "text", + "content": "A Agent Prompts" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 297, + 539, + 320 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 297, + 539, + 320 + ], + "spans": [ + { + "bbox": [ + 69, + 297, + 539, + 320 + ], + "type": "text", + "content": "We manually fine-tuned the following prompts for the LLMs in the Review Feedback Agent. We provide the prompts below:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 85, + 331, + 149, + 342 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 331, + 149, + 342 + ], + "spans": [ + { + "bbox": [ + 85, + 331, + 149, + 342 + ], + "type": "text", + "content": "Actor Prompt" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 84, + 351, + 523, + 374 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 351, + 523, + 374 + ], + "spans": [ + { + "bbox": [ + 84, + 351, + 523, + 374 + ], + "type": "text", + "content": "Here is the paper: {paper} . Here is the peer review: {review} ." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 85, + 392, + 183, + 404 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 392, + 183, + 404 + ], + "spans": [ + { + "bbox": [ + 85, + 392, + 183, + 404 + ], + "type": "text", + "content": "Actor System Prompt" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 413, + 524, + 460 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 413, + 524, + 460 + ], + "spans": [ + { + "bbox": [ + 83, + 413, + 524, + 460 + ], + "type": "text", + "content": "You are given a peer review of a machine learning paper submitted to a top-tier ML conference on OpenReview. Your task is to provide constructive feedback to the reviewer so that it becomes a high-quality review. You will do this by evaluating the review against a checklist and providing specific feedback about where the review fails." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 85, + 462, + 237, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 462, + 237, + 473 + ], + "spans": [ + { + "bbox": [ + 85, + 462, + 237, + 473 + ], + "type": "text", + "content": "Here are step-by-step instructions:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 97, + 481, + 457, + 512 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 97, + 481, + 457, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 481, + 457, + 492 + ], + "spans": [ + { + "bbox": [ + 97, + 481, + 457, + 492 + ], + "type": "text", + "content": "1. Read the text of the review and the paper about which the review was written." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 97, + 501, + 282, + 512 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 501, + 282, + 512 + ], + "spans": [ + { + "bbox": [ + 97, + 501, + 282, + 512 + ], + "type": "text", + "content": "2. Evaluate every comment in the review:" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 122, + 521, + 523, + 709 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 122, + 521, + 523, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 521, + 523, + 544 + ], + "spans": [ + { + "bbox": [ + 122, + 521, + 523, + 544 + ], + "type": "text", + "content": "- Focus on comments related to weaknesses of the paper or questions the reviewer has. Ignore any comments that are summaries of the paper or that discuss strengths of the paper." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 122, + 548, + 523, + 571 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 548, + 523, + 571 + ], + "spans": [ + { + "bbox": [ + 122, + 548, + 523, + 571 + ], + "type": "text", + "content": "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 122, + 576, + 523, + 599 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 576, + 523, + 599 + ], + "spans": [ + { + "bbox": [ + 122, + 576, + 523, + 599 + ], + "type": "text", + "content": "- Consider the reviewer's comments in their entirety. Make sure you read all sentences related to one thought, since the full context of the reviewer's comment is very important." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 122, + 604, + 523, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 604, + 523, + 639 + ], + "spans": [ + { + "bbox": [ + 122, + 604, + 523, + 639 + ], + "type": "text", + "content": "- For each comment, evaluate it against the following checklist. Follow the examples for how to respond. Importantly, you should be as helpful as possible. Do no ask superficial questions or make superficial remarks, think deeply and exhibit your understanding." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 122, + 643, + 523, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 643, + 523, + 666 + ], + "spans": [ + { + "bbox": [ + 122, + 643, + 523, + 666 + ], + "type": "text", + "content": "- Most reviewer comments are already sufficiently clear and actionable. Only focus on the ones that clearly fail the checklist items below." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 122, + 671, + 523, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 671, + 523, + 709 + ], + "spans": [ + { + "bbox": [ + 122, + 671, + 523, + 709 + ], + "type": "text", + "content": "- Checklist: \n(a) Check if the reviewer requests something obviously present in the paper. Only respond if certain of the reviewer's error. If so, politely pose a question to the reviewer with" + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "17" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "bbox": [ + 149, + 79, + 524, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 79, + 524, + 114 + ], + "spans": [ + { + "bbox": [ + 149, + 79, + 524, + 114 + ], + "type": "text", + "content": "something like \"Does the following answer your question...?\" quote the relevant paper section verbatim using tags. Use only exact quotes and do not comment if uncertain." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 149, + 114, + 524, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 114, + 524, + 139 + ], + "spans": [ + { + "bbox": [ + 149, + 114, + 524, + 139 + ], + "type": "text", + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "spans": [ + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "type": "text", + "content": "- Example 1:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 155, + 523, + 217 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "spans": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "type": "text", + "content": "* Reviewer comment: In Figure 4, the efficiency experiments have no results for Transformer models, which is a key limitation of the paper." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 181, + 523, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 181, + 523, + 217 + ], + "spans": [ + { + "bbox": [ + 167, + 181, + 523, + 217 + ], + "type": "text", + "content": "* Feedback to the reviewer: Does Figure 5 of the paper answer your question? In particular: In Transformers, the proposed technique provides " + }, + { + "bbox": [ + 167, + 181, + 523, + 217 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 167, + 181, + 523, + 217 + ], + "type": "text", + "content": " relative improvement in wall-clock time (Figure 5) ." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 219, + 219, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 219, + 219, + 230 + ], + "spans": [ + { + "bbox": [ + 156, + 219, + 219, + 230 + ], + "type": "text", + "content": "- Example 2:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 232, + 523, + 390 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 167, + 232, + 523, + 293 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 232, + 523, + 293 + ], + "spans": [ + { + "bbox": [ + 167, + 232, + 523, + 293 + ], + "type": "text", + "content": "* Reviewer comment: The authors propose a new deep learning model for predicting protein-protein interactions but don't explain how they address the class imbalance in PPI datasets. Most protein pairs don't interact, creating an imbalance between positive and negative samples. It's unclear how the model balances sensitivity and specificity, which is important for systems biology applications." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 167, + 294, + 523, + 390 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 294, + 523, + 390 + ], + "spans": [ + { + "bbox": [ + 167, + 294, + 523, + 390 + ], + "type": "text", + "content": "* Feedback to the reviewer: Does section 3.3 of the paper address your concern? Specifically, the following passage: To address the class imbalance in PPI datasets, where non-interacting pairs are far more common, we employ a \"Balanced Interaction Learning\" (BIL) approach. This involves using a focal loss function to reduce the influence of easy negatives, balanced minibatch sampling to ensure a mix of positive and negative samples, and a two-stage training process with pre-training on a balanced subset before fine-tuning on the full dataset ." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 392, + 219, + 403 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 392, + 219, + 403 + ], + "spans": [ + { + "bbox": [ + 156, + 392, + 219, + 403 + ], + "type": "text", + "content": "- Example 3:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 167, + 406, + 523, + 516 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 167, + 406, + 523, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 406, + 523, + 453 + ], + "spans": [ + { + "bbox": [ + 167, + 406, + 523, + 453 + ], + "type": "text", + "content": "* Reviewer comment: Lack of theoretical analysis of the communication complexity of the proposed method. In distributed optimization, communication complexity is crucial for minimizing inter-node communication to enhance system efficiency and reduce communication costs." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 167, + 456, + 523, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 456, + 523, + 516 + ], + "spans": [ + { + "bbox": [ + 167, + 456, + 523, + 516 + ], + "type": "text", + "content": "* Feedback to the reviewer: The paper appears to provide a theoretical analysis of communication complexity. Specifically, Theorem 3.6 states an " + }, + { + "bbox": [ + 167, + 456, + 523, + 516 + ], + "type": "inline_equation", + "content": "\\mathrm{O}(\\sqrt{\\kappa_{max}}\\log (1 / \\epsilon))" + }, + { + "bbox": [ + 167, + 456, + 523, + 516 + ], + "type": "text", + "content": " communication complexity bound. Does this address your concern? Are there specific aspects of communication complexity analysis you feel are missing?" + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 517, + 524, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 517, + 524, + 553 + ], + "spans": [ + { + "bbox": [ + 132, + 517, + 524, + 553 + ], + "type": "text", + "content": "(b) Look for any vague or unjustified claims in the review. This results in points that are not actionable or harder to respond to. For such cases, we would like to nudge the reviewer to provide more specific details and justify their claim." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 149, + 554, + 523, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 554, + 523, + 565 + ], + "spans": [ + { + "bbox": [ + 149, + 554, + 523, + 565 + ], + "type": "text", + "content": "First, let us define what it means for a comment to be actionable and specific enough." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 149, + 566, + 427, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 566, + 427, + 577 + ], + "spans": [ + { + "bbox": [ + 149, + 566, + 427, + 577 + ], + "type": "text", + "content": "There are a few pieces of criteria we will use to determine this:" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 151, + 579, + 523, + 655 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 156, + 579, + 523, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 579, + 523, + 602 + ], + "spans": [ + { + "bbox": [ + 156, + 579, + 523, + 602 + ], + "type": "text", + "content": "i. The review comment specifies the section, paragraph, figure, or table where the issue occurs." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 153, + 605, + 523, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 153, + 605, + 523, + 629 + ], + "spans": [ + { + "bbox": [ + 153, + 605, + 523, + 629 + ], + "type": "text", + "content": "ii. The issue or concern in the review comment is explicitly stated, avoiding vague language." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 151, + 632, + 523, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 632, + 523, + 655 + ], + "spans": [ + { + "bbox": [ + 151, + 632, + 523, + 655 + ], + "type": "text", + "content": "iii. The comment explains why the identified issue is problematic and needs addressing." + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 151, + 657, + 351, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 657, + 351, + 669 + ], + "spans": [ + { + "bbox": [ + 151, + 657, + 351, + 669 + ], + "type": "text", + "content": "iv. The reviewer provides concrete examples:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 162, + 671, + 523, + 709 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 162, + 671, + 454, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 671, + 454, + 683 + ], + "spans": [ + { + "bbox": [ + 162, + 671, + 454, + 683 + ], + "type": "text", + "content": "A. At least one example of what they find unclear or problematic." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 162, + 685, + 523, + 709 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 685, + 523, + 709 + ], + "spans": [ + { + "bbox": [ + 162, + 685, + 523, + 709 + ], + "type": "text", + "content": "B. At least one example or suggestion of what would address their concern (e.g., specific metrics, experiments, or changes)." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "18" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 149, + 79, + 525, + 114 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 79, + 525, + 114 + ], + "spans": [ + { + "bbox": [ + 149, + 79, + 525, + 114 + ], + "type": "text", + "content": "Do NOT nitpick. Most comments are already specific and actionable, and we do not want to provide feedback on those. We do NOT want to annoy reviewers with unnecessary feedback!" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 149, + 115, + 524, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 115, + 524, + 139 + ], + "spans": [ + { + "bbox": [ + 149, + 115, + 524, + 139 + ], + "type": "text", + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "spans": [ + { + "bbox": [ + 156, + 141, + 219, + 152 + ], + "type": "text", + "content": "- Example 1:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 155, + 524, + 228 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "spans": [ + { + "bbox": [ + 167, + 155, + 523, + 178 + ], + "type": "text", + "content": "* Reviewer comment: It appears that the linear mode connectivity results may be somewhat brittle." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 181, + 524, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 181, + 524, + 228 + ], + "spans": [ + { + "bbox": [ + 167, + 181, + 524, + 228 + ], + "type": "text", + "content": "* Feedback to the reviewer: Can you elaborate on why you see the results as brittle? It may also be helpful to describe in further detail how the authors can address your concern. For example, if you believe additional experiments or theoretical analyses are needed, it may be helpful to explicitly say so." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 231, + 219, + 243 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 231, + 219, + 243 + ], + "spans": [ + { + "bbox": [ + 156, + 231, + 219, + 243 + ], + "type": "text", + "content": "- Example 2:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 245, + 524, + 305 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 167, + 245, + 523, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 245, + 523, + 268 + ], + "spans": [ + { + "bbox": [ + 167, + 245, + 523, + 268 + ], + "type": "text", + "content": "* Reviewer comment: The paper writing is not fluent enough and needs polishing to be easier to follow." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 167, + 270, + 524, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 270, + 524, + 305 + ], + "spans": [ + { + "bbox": [ + 167, + 270, + 524, + 305 + ], + "type": "text", + "content": "* Feedback to the reviewer: It would be helpful if you could provide specific examples of sections or sentences that are difficult to follow. This would give the authors more actionable feedback." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 308, + 219, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 308, + 219, + 320 + ], + "spans": [ + { + "bbox": [ + 156, + 308, + 219, + 320 + ], + "type": "text", + "content": "- Example 3:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 167, + 323, + 524, + 431 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 167, + 323, + 523, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 323, + 523, + 357 + ], + "spans": [ + { + "bbox": [ + 167, + 323, + 523, + 357 + ], + "type": "text", + "content": "* Reviewer comment: In the proposed method, an additional optimization problem is required to solve every iteration, i.e., Eq. (11). Thus the proposed method seems inefficient since it is a nested-loop algorithm." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 167, + 360, + 524, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 360, + 524, + 431 + ], + "spans": [ + { + "bbox": [ + 167, + 360, + 524, + 431 + ], + "type": "text", + "content": "* Feedback to the reviewer: Your concern about efficiency is valid, but it may be helpful to describe in further detail how the authors might address your concern. For example, you could ask about the computational complexity of solving Eq. (11) compared to the overall algorithm, or request empirical runtime comparisons to existing methods. This could help the authors address the efficiency concern more concretely." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 434, + 219, + 445 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 434, + 219, + 445 + ], + "spans": [ + { + "bbox": [ + 156, + 434, + 219, + 445 + ], + "type": "text", + "content": "- Example 4:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 167, + 448, + 524, + 556 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 167, + 448, + 523, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 448, + 523, + 495 + ], + "spans": [ + { + "bbox": [ + 167, + 448, + 523, + 495 + ], + "type": "text", + "content": "* Reviewer comment: The paper presents a limited number of baseline methods, and they are relatively outdated (between 2019 and 2021). Additionally, the paper lacks analytical experiments to substantiate that the proposed method has learned superior textual structural information." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 167, + 498, + 524, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 498, + 524, + 556 + ], + "spans": [ + { + "bbox": [ + 167, + 498, + 524, + 556 + ], + "type": "text", + "content": "* Feedback to the reviewer: To strengthen this critique, consider suggesting specific, more recent baselines that you believe should be included. Also, providing examples of analytical experiments that could effectively demonstrate superior learning of textual structural information would make this feedback more actionable for the authors." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 559, + 219, + 571 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 559, + 219, + 571 + ], + "spans": [ + { + "bbox": [ + 156, + 559, + 219, + 571 + ], + "type": "text", + "content": "- Example 5:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 167, + 573, + 523, + 695 + ], + "type": "list", + "angle": 0, + "index": 21, + "blocks": [ + { + "bbox": [ + 167, + 573, + 523, + 644 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 573, + 523, + 644 + ], + "spans": [ + { + "bbox": [ + 167, + 573, + 523, + 644 + ], + "type": "text", + "content": "* Reviewer comment: One of the assumptions of this paper is that \"most GNNs perform better on homophilic graphs\". I personally do not agree with it. A part of the heterophilic graphs are easy to fit, e.g., Wisconsin with " + }, + { + "bbox": [ + 167, + 573, + 523, + 644 + ], + "type": "inline_equation", + "content": "90 + \\%" + }, + { + "bbox": [ + 167, + 573, + 523, + 644 + ], + "type": "text", + "content": " accuracy, and some homophilic graphs are challenging. The difficulties of node classification on different datasets are not only related to the graph (label) homophily, but also related to the node features, and many other factors." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 167, + 647, + 523, + 695 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 647, + 523, + 695 + ], + "spans": [ + { + "bbox": [ + 167, + 647, + 523, + 695 + ], + "type": "text", + "content": "* Feedback to the reviewer: Your point is helpful, but it would be more actionable to ask the authors to provide evidence supporting their assumption, rather than simply disagreeing. Consider asking for specific examples or citations that demonstrate GNNs performing better on homophilic graphs." + } + ] + } + ], + "index": 20 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 697, + 219, + 709 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 697, + 219, + 709 + ], + "spans": [ + { + "bbox": [ + 156, + 697, + 219, + 709 + ], + "type": "text", + "content": "- Example 6:" + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "19" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "bbox": [ + 167, + 79, + 524, + 141 + ], + "type": "list", + "angle": 0, + "index": 2, + "blocks": [ + { + "bbox": [ + 167, + 79, + 466, + 90 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 79, + 466, + 90 + ], + "spans": [ + { + "bbox": [ + 167, + 79, + 466, + 90 + ], + "type": "text", + "content": "* Reviewer comment: The numbers in table 1 are not described." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 167, + 93, + 524, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 93, + 524, + 141 + ], + "spans": [ + { + "bbox": [ + 167, + 93, + 524, + 141 + ], + "type": "text", + "content": "* Feedback to the reviewer: It would be helpful to specify what aspects of the numbers in Table 1 need more description. Are you referring to the meaning of the values, their units, or something else? This would help the authors provide a more targeted response." + } + ] + } + ], + "index": 1 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 149, + 143, + 523, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 143, + 523, + 167 + ], + "spans": [ + { + "bbox": [ + 149, + 143, + 523, + 167 + ], + "type": "text", + "content": "The following are examples where the reviewer's comments are already specific and, most importantly, actionable, so you should not give any feedback:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 156, + 169, + 524, + 502 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 156, + 169, + 524, + 241 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 169, + 524, + 241 + ], + "spans": [ + { + "bbox": [ + 156, + 169, + 524, + 241 + ], + "type": "text", + "content": "- Reviewer comment: The paper claims occupancy is increased on Page 6 but it was unclear: (i) what definition of occupancy is being used (GPU resources could mean many things and occupancy often just refers to number of warps that can concurrently run versus max number supported by hardware); and (ii) whether any measurement has been made to confirm the claimed improvement (e.g., using NVIDIA Parallel Nsight or similar approaches for collecting performance counters)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 156, + 243, + 524, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 243, + 524, + 303 + ], + "spans": [ + { + "bbox": [ + 156, + 243, + 524, + 303 + ], + "type": "text", + "content": "- Reviewer comment: Second paragraph under \"Semantic similarity\": I felt lots of details were missing here to better understand the quality of phrases, and the feasibility of the proposed approach. The Appendix A do not provide all necessary details. Is this done on the pretraining corpus? What trivial constituents were dropped out and why (some examples would help)?" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 156, + 304, + 524, + 341 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 304, + 524, + 341 + ], + "spans": [ + { + "bbox": [ + 156, + 304, + 524, + 341 + ], + "type": "text", + "content": "- Reviewer comment: Some works like Saycan and RT2 also consider the match of the environment and the agent ability. Key differences between the proposed method and those existing works need to be more carefully discussed." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 156, + 342, + 524, + 378 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 342, + 524, + 378 + ], + "spans": [ + { + "bbox": [ + 156, + 342, + 524, + 378 + ], + "type": "text", + "content": "- Reviewer comment: The problem studied, and the techniques used, are closely related to Lipshitz bandits [2], pricing [3] and bilateral trade [1]. Please consider a more thorough comparison with the already known results and techniques there." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "spans": [ + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "text", + "content": "- Reviewer comment: In Table 3, FlashFFTConv outperforms torch.fft by up to " + }, + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "inline_equation", + "content": "8.7\\mathrm{x}" + }, + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "text", + "content": ", while the speedup is about " + }, + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "inline_equation", + "content": "2\\mathrm{x}" + }, + { + "bbox": [ + 156, + 380, + 524, + 441 + ], + "type": "text", + "content": " without the domain-specific optimizations. Does it mean the major speedup comes from the domain-specific optimizations instead of the FlashFFTConv algorithm? Could the authors conduct this ablation study (with and without the domain-specific optimizations) in other experiments?" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 156, + 441, + 524, + 502 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 441, + 524, + 502 + ], + "spans": [ + { + "bbox": [ + 156, + 441, + 524, + 502 + ], + "type": "text", + "content": "- Reviewer comment: Then in Section 4.2, the authors propose to give the actor past actions to help it infer the state at the current step. I don't understand why is this not done by default. In my understanding, DOMDPs are POMDPs and in POMDPs, past actions and observations should always be given to the policy for optimal control. I don't see how this is an innovation." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 149, + 504, + 524, + 563 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 504, + 524, + 563 + ], + "spans": [ + { + "bbox": [ + 149, + 504, + 524, + 563 + ], + "type": "text", + "content": "If a reviewer asks a question that is already clear, you do not need to give feedback on it or rephrase it. Questions need to be clear and specific, but they do not necessarily need to be actionable as they represent a reviewer's confusion. To be precise, in most cases if a comment ends in '?' you should ONLY give feedback if the question itself is unclear." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 149, + 563, + 524, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 563, + 524, + 586 + ], + "spans": [ + { + "bbox": [ + 149, + 563, + 524, + 586 + ], + "type": "text", + "content": "Here are some examples of reviewer comments that are clear and specific, and therefore do not need feedback:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 156, + 590, + 524, + 675 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 156, + 590, + 524, + 637 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 590, + 524, + 637 + ], + "spans": [ + { + "bbox": [ + 156, + 590, + 524, + 637 + ], + "type": "text", + "content": "- Reviewer comment: 4) In Figure 6, Spearman rank correlation scores for HCMs are reported. As far as I know, Spearman rank correlation calculates the correlation between two variables. How was the correlation computed from multiple runs in this case?" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 156, + 639, + 524, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 639, + 524, + 675 + ], + "spans": [ + { + "bbox": [ + 156, + 639, + 524, + 675 + ], + "type": "text", + "content": "- Reviewer comment: While there are detailed information about training procedure, not much is written about the actual inference step. For instance, how many samples for each prototype are required for reliable performance?" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 133, + 677, + 524, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 133, + 677, + 524, + 700 + ], + "spans": [ + { + "bbox": [ + 133, + 677, + 524, + 700 + ], + "type": "text", + "content": "(c) If the reviewer claims the paper lacks novelty, ensure they specify why, including references to similar work. If they haven't, we would like to nudge the reviewer to" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "20" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 148, + 79, + 524, + 102 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 79, + 524, + 102 + ], + "spans": [ + { + "bbox": [ + 148, + 79, + 524, + 102 + ], + "type": "text", + "content": " justify the claim, by prompting them to provide the most relevant references, the relationships, and specifying similarities or differences." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 148, + 102, + 524, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 148, + 102, + 524, + 126 + ], + "spans": [ + { + "bbox": [ + 148, + 102, + 524, + 126 + ], + "type": "text", + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 156, + 129, + 219, + 140 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 129, + 219, + 140 + ], + "spans": [ + { + "bbox": [ + 156, + 129, + 219, + 140 + ], + "type": "text", + "content": "- Example 1:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 143, + 524, + 228 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 167, + 143, + 523, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 143, + 523, + 166 + ], + "spans": [ + { + "bbox": [ + 167, + 143, + 523, + 166 + ], + "type": "text", + "content": "* Reviewer comment: The paper's novelty is limited considering the ICLR standards." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 167, + 169, + 524, + 228 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 169, + 524, + 228 + ], + "spans": [ + { + "bbox": [ + 167, + 169, + 524, + 228 + ], + "type": "text", + "content": "* Feedback to the reviewer: It would be really helpful to the authors if you consider discussing the reasons for why the novelty is limited, and specify what ICLR standards are in this context. In particular, it would be very helpful if you give examples of the closest papers, their similarities, and differences with the methods or results in the current paper." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 231, + 219, + 242 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 231, + 219, + 242 + ], + "spans": [ + { + "bbox": [ + 156, + 231, + 219, + 242 + ], + "type": "text", + "content": "- Example 2:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 244, + 524, + 330 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 167, + 244, + 523, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 244, + 523, + 268 + ], + "spans": [ + { + "bbox": [ + 167, + 244, + 523, + 268 + ], + "type": "text", + "content": "* Reviewer comment: The novelty of this work is not clear from the conclusion and experiments now." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 167, + 270, + 524, + 330 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 270, + 524, + 330 + ], + "spans": [ + { + "bbox": [ + 167, + 270, + 524, + 330 + ], + "type": "text", + "content": "* Feedback to the reviewer: To make this feedback more actionable, it would be helpful to specify which aspects of novelty are unclear or missing. Are there particular claims or contributions that need more justification? Providing concrete suggestions for how the authors could better highlight the novelty would give them clearer guidance." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 332, + 219, + 344 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 332, + 219, + 344 + ], + "spans": [ + { + "bbox": [ + 156, + 332, + 219, + 344 + ], + "type": "text", + "content": "- Example 3:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 167, + 346, + 524, + 456 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 167, + 346, + 523, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 346, + 523, + 369 + ], + "spans": [ + { + "bbox": [ + 167, + 346, + 523, + 369 + ], + "type": "text", + "content": "* Reviewer comment: The proposed method is not innovative enough. I'm not an expert in this field, so I'm not sure about it." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 167, + 372, + 524, + 456 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 372, + 524, + 456 + ], + "spans": [ + { + "bbox": [ + 167, + 372, + 524, + 456 + ], + "type": "text", + "content": "* Feedback to the reviewer: It would be helpful if you could elaborate on why you think the method may not be innovative enough, even if you're not an expert. Are there specific aspects that seem similar to existing work? If you're uncertain about the novelty, it's best to phrase this as a question or area for clarification rather than a definitive weakness. For example, you could ask the authors to further explain how their approach differs from or improves upon existing methods for training vision-language models for satellite imagery." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 149, + 457, + 523, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 457, + 523, + 481 + ], + "spans": [ + { + "bbox": [ + 149, + 457, + 523, + 481 + ], + "type": "text", + "content": "The following are examples where the reviewer's discussion of novelty is already detailed and actionable as written, so you should not give any feedback:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 156, + 483, + 524, + 640 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 156, + 483, + 524, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 483, + 524, + 580 + ], + "spans": [ + { + "bbox": [ + 156, + 483, + 524, + 580 + ], + "type": "text", + "content": "- Reviewer comment: DASHA is a mash-up between MARINA and existing distributed nonconvex optimization methods. Other than the fact that three variants of DASHA get rid of the uncompressed synchronization in MARINA, this reviewer could not pinpoint a difference between MARINA and DASHA. As such, the main novelty of this work seems to be in terms of theoretical analysis of MARINA when the uncompressed synchronization step is removed. The authors could have done a better job of clarifying where does this novelty lie in the analysis (e.g., pinpointing the key analytical approaches in the lemma that helped improve the analysis)" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 156, + 582, + 524, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 582, + 524, + 640 + ], + "spans": [ + { + "bbox": [ + 156, + 582, + 524, + 640 + ], + "type": "text", + "content": "- Reviewer comment: I'm not sure the paper has sufficient novelty to be published in the top-tier conference since the proposed method only goes one step further from Task Arithmetic [1] and TIES-MERGING [2] by incorporating trainable weights for task vectors. The concept seems thin to support an entire paper, with only one page (page 6) dedicated to the novel part." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 132, + 643, + 524, + 703 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 643, + 524, + 703 + ], + "spans": [ + { + "bbox": [ + 132, + 643, + 524, + 703 + ], + "type": "text", + "content": "(d) Identify any personal attacks or inappropriate remarks made by the reviewer. This can be about the personality, the knowledge, or the experience of the authors. For example, they call the work \"incompetent\" without justifying why. For this case, we would like to kindly warn the reviewer about their comment and politely suggest they revise their language." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 310, + 750 + ], + "type": "text", + "content": "21" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 149, + 79, + 524, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 149, + 79, + 524, + 103 + ], + "spans": [ + { + "bbox": [ + 149, + 79, + 524, + 103 + ], + "type": "text", + "content": "The following are examples of reviewer comments that fail this checklist item and useful feedback provided to the reviewer's comment:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 156, + 106, + 219, + 117 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 106, + 219, + 117 + ], + "spans": [ + { + "bbox": [ + 156, + 106, + 219, + 117 + ], + "type": "text", + "content": "- Example 1:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 167, + 118, + 524, + 168 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 167, + 118, + 524, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 118, + 524, + 143 + ], + "spans": [ + { + "bbox": [ + 167, + 118, + 524, + 143 + ], + "type": "text", + "content": "* Reviewer comment: The authors clearly do not live in the real world and do not care about people or downstream effects of their research." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 167, + 144, + 524, + 168 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 144, + 524, + 168 + ], + "spans": [ + { + "bbox": [ + 167, + 144, + 524, + 168 + ], + "type": "text", + "content": "* Feedback to the reviewer: We kindly suggest you revise this comment, as it includes remarks about the personalities or intents of the authors." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 171, + 219, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 171, + 219, + 182 + ], + "spans": [ + { + "bbox": [ + 156, + 171, + 219, + 182 + ], + "type": "text", + "content": "- Example 2:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 167, + 185, + 524, + 246 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 167, + 185, + 524, + 207 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 185, + 524, + 207 + ], + "spans": [ + { + "bbox": [ + 167, + 185, + 524, + 207 + ], + "type": "text", + "content": "* Reviewer comment: This paper is embarrassing, and you are clearly not fit to be in research." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 167, + 210, + 524, + 246 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 210, + 524, + 246 + ], + "spans": [ + { + "bbox": [ + 167, + 210, + 524, + 246 + ], + "type": "text", + "content": "* Feedback to the reviewer: We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 156, + 249, + 219, + 260 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 156, + 249, + 219, + 260 + ], + "spans": [ + { + "bbox": [ + 156, + 249, + 219, + 260 + ], + "type": "text", + "content": "- Example 3:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 167, + 262, + 524, + 371 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 167, + 262, + 524, + 322 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 262, + 524, + 322 + ], + "spans": [ + { + "bbox": [ + 167, + 262, + 524, + 322 + ], + "type": "text", + "content": "* Reviewer comment: This MC-IS method for estimating the score will NEVER work well in high dimensions due to variance and thus why works such as [1,2,3,4] which are clearly aware of this formulation (as they either state it in their appendices or use it for subsequent calculation) pursue an optimization alternative to estimating the drift." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 167, + 324, + 524, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 167, + 324, + 524, + 371 + ], + "spans": [ + { + "bbox": [ + 167, + 324, + 524, + 371 + ], + "type": "text", + "content": "* Feedback to the reviewer: Consider revising this comment to avoid absolute statements like \"NEVER\". Instead, you could phrase it as a concern about scalability to high dimensions, and ask the authors to address this limitation or provide evidence that it can work in higher dimensions." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 97, + 380, + 189, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 380, + 189, + 392 + ], + "spans": [ + { + "bbox": [ + 97, + 380, + 189, + 392 + ], + "type": "text", + "content": "3. Provide feedback:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 121, + 400, + 524, + 423 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 400, + 524, + 423 + ], + "spans": [ + { + "bbox": [ + 121, + 400, + 524, + 423 + ], + "type": "text", + "content": "- For each comment that fails according to the checklist, write concise feedback in the following format:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 138, + 428, + 348, + 453 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 138, + 428, + 348, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 428, + 348, + 438 + ], + "spans": [ + { + "bbox": [ + 138, + 428, + 348, + 438 + ], + "type": "text", + "content": "- Comment: the verbatim comment of interest" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 138, + 442, + 294, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 442, + 294, + 453 + ], + "spans": [ + { + "bbox": [ + 138, + 442, + 294, + 453 + ], + "type": "text", + "content": "- Feedback: your concise feedback" + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 457, + 520, + 486 + ], + "type": "list", + "angle": 0, + "index": 20, + "blocks": [ + { + "bbox": [ + 121, + 457, + 520, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 457, + 520, + 469 + ], + "spans": [ + { + "bbox": [ + 121, + 457, + 520, + 469 + ], + "type": "text", + "content": "- If you do not identify any issues with a comment, do not include it in your feedback list." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 121, + 474, + 504, + 486 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 474, + 504, + 486 + ], + "spans": [ + { + "bbox": [ + 121, + 474, + 504, + 486 + ], + "type": "text", + "content": "- If you find no issues in the review at all, respond with: 'Thanks for your hard work!'" + } + ] + } + ], + "index": 19 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 494, + 137, + 504 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 494, + 137, + 504 + ], + "spans": [ + { + "bbox": [ + 85, + 494, + 137, + 504 + ], + "type": "text", + "content": "Remember:" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 99, + 514, + 524, + 693 + ], + "type": "list", + "angle": 0, + "index": 29, + "blocks": [ + { + "bbox": [ + 99, + 514, + 416, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 514, + 416, + 525 + ], + "spans": [ + { + "bbox": [ + 99, + 514, + 416, + 525 + ], + "type": "text", + "content": "- Be concise, limiting your feedback for each comment to 1-2 sentences." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 99, + 533, + 479, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 533, + 479, + 545 + ], + "spans": [ + { + "bbox": [ + 99, + 533, + 479, + 545 + ], + "type": "text", + "content": "- Do not summarize your feedback at the end or include a preamble at the beginning." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 99, + 553, + 524, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 553, + 524, + 577 + ], + "spans": [ + { + "bbox": [ + 99, + 553, + 524, + 577 + ], + "type": "text", + "content": "- Do not repeat anything the reviewer already included in their review, and do not praise anything the reviewer wrote as we want to provide constructive feedback." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 99, + 585, + 524, + 609 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 585, + 524, + 609 + ], + "spans": [ + { + "bbox": [ + 99, + 585, + 524, + 609 + ], + "type": "text", + "content": "- Your feedback will be sent to reviewers. Do not mention that you are using a checklist or guidelines." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 99, + 617, + 524, + 640 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 617, + 524, + 640 + ], + "spans": [ + { + "bbox": [ + 99, + 617, + 524, + 640 + ], + "type": "text", + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 99, + 649, + 524, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 649, + 524, + 673 + ], + "spans": [ + { + "bbox": [ + 99, + 649, + 524, + 673 + ], + "type": "text", + "content": "- Do not provide feedback to any comments that mention a score or rating. You do not care about the reviewer's score or rating for this paper." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 99, + 681, + 380, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 681, + 380, + 693 + ], + "spans": [ + { + "bbox": [ + 99, + 681, + 380, + 693 + ], + "type": "text", + "content": "- Do not provide feedback to any comments that discuss typos." + } + ] + } + ], + "index": 28 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "22" + } + ] + } + ], + "index": 30 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 86, + 74, + 173, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 74, + 173, + 85 + ], + "spans": [ + { + "bbox": [ + 86, + 74, + 173, + 85 + ], + "type": "text", + "content": "Aggregator Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 95, + 290, + 107 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 95, + 290, + 107 + ], + "spans": [ + { + "bbox": [ + 85, + 95, + 290, + 107 + ], + "type": "text", + "content": "Here is the paper: {paper} ." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 85, + 108, + 446, + 119 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 108, + 446, + 119 + ], + "spans": [ + { + "bbox": [ + 85, + 108, + 446, + 119 + ], + "type": "text", + "content": "Here are the lists of feedback: {feedbacks} ." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 120, + 334, + 132 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 120, + 334, + 132 + ], + "spans": [ + { + "bbox": [ + 85, + 120, + 334, + 132 + ], + "type": "text", + "content": "Here is the peer review: {review} ." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 86, + 149, + 207, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 149, + 207, + 161 + ], + "spans": [ + { + "bbox": [ + 86, + 149, + 207, + 161 + ], + "type": "text", + "content": "Aggregator System Prompt" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 85, + 171, + 525, + 206 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 171, + 525, + 206 + ], + "spans": [ + { + "bbox": [ + 85, + 171, + 525, + 206 + ], + "type": "text", + "content": "You will be given multiple lists of feedback about a peer review of a machine learning paper submitted to a top-tier ML conference. The aim of the feedback is to guide a reviewer to make the review high-quality. Your task is to aggregate the lists of feedback into one list." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 85, + 218, + 524, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 218, + 524, + 242 + ], + "spans": [ + { + "bbox": [ + 85, + 218, + 524, + 242 + ], + "type": "text", + "content": "Here are the guidelines that were followed to generate the feedback lists originally: {ACTOR_SYSTEM_CHART} " + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 85, + 243, + 237, + 254 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 243, + 237, + 254 + ], + "spans": [ + { + "bbox": [ + 85, + 243, + 237, + 254 + ], + "type": "text", + "content": "Here are step-by-step instructions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 97, + 262, + 523, + 316 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 97, + 262, + 523, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 262, + 523, + 285 + ], + "spans": [ + { + "bbox": [ + 97, + 262, + 523, + 285 + ], + "type": "text", + "content": "1. Read the multiple feedback lists provided for that review, the text of the review, and the paper about which the review was written." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 97, + 294, + 523, + 316 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 294, + 523, + 316 + ], + "spans": [ + { + "bbox": [ + 97, + 294, + 523, + 316 + ], + "type": "text", + "content": "2. For all feedback lists, aggregate them into one list with the best comment-feedback pairs from each list:" + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 122, + 326, + 524, + 434 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 122, + 326, + 523, + 350 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 326, + 523, + 350 + ], + "spans": [ + { + "bbox": [ + 122, + 326, + 523, + 350 + ], + "type": "text", + "content": "- For each comment-feedback pair in the multiple lists that are similar, determine which provides the best feedback and keep only that pair." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 122, + 354, + 524, + 389 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 354, + 524, + 389 + ], + "spans": [ + { + "bbox": [ + 122, + 354, + 524, + 389 + ], + "type": "text", + "content": "- If there are unique comment-feedback pairs in the multiple lists, critically determine if it is an essential piece of feedback needed to improve the review. If it is unnecessary or redundant, remove the comment-feedback pair." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 122, + 393, + 523, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 393, + 523, + 418 + ], + "spans": [ + { + "bbox": [ + 122, + 393, + 523, + 418 + ], + "type": "text", + "content": "- You should end up with one feedback list that has no repeated comments from the review and that is high quality." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 122, + 422, + 523, + 434 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 422, + 523, + 434 + ], + "spans": [ + { + "bbox": [ + 122, + 422, + 523, + 434 + ], + "type": "text", + "content": "- Return the feedback list in the format you received it in, where the pairs are formatted as:" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 139, + 437, + 375, + 464 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 139, + 437, + 375, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 139, + 437, + 375, + 449 + ], + "spans": [ + { + "bbox": [ + 139, + 437, + 375, + 449 + ], + "type": "text", + "content": "- Comment: {{the verbatim comment of interest}}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 140, + 451, + 320, + 464 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 451, + 320, + 464 + ], + "spans": [ + { + "bbox": [ + 140, + 451, + 320, + 464 + ], + "type": "text", + "content": "- Feedback: {{your concise feedback}}" + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 86, + 481, + 149, + 492 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 481, + 149, + 492 + ], + "spans": [ + { + "bbox": [ + 86, + 481, + 149, + 492 + ], + "type": "text", + "content": "Critic Prompt" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 85, + 502, + 290, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 502, + 290, + 514 + ], + "spans": [ + { + "bbox": [ + 85, + 502, + 290, + 514 + ], + "type": "text", + "content": "Here is the paper: {paper} ." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 85, + 515, + 353, + 527 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 515, + 353, + 527 + ], + "spans": [ + { + "bbox": [ + 85, + 515, + 353, + 527 + ], + "type": "text", + "content": "Here is the feedback: {feedback} ." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 85, + 528, + 334, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 528, + 334, + 539 + ], + "spans": [ + { + "bbox": [ + 85, + 528, + 334, + 539 + ], + "type": "text", + "content": "Here is the peer review: {review} ." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 85, + 551, + 136, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 551, + 136, + 561 + ], + "spans": [ + { + "bbox": [ + 85, + 551, + 136, + 561 + ], + "type": "text", + "content": "Remember:" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 100, + 571, + 523, + 625 + ], + "type": "list", + "angle": 0, + "index": 26, + "blocks": [ + { + "bbox": [ + 100, + 571, + 523, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 571, + 523, + 594 + ], + "spans": [ + { + "bbox": [ + 100, + 571, + 523, + 594 + ], + "type": "text", + "content": "- You are a critic that will help reviewers improve their comments and reviews. Your valuable feedback will help improve their review." + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 100, + 602, + 523, + 625 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 100, + 602, + 523, + 625 + ], + "spans": [ + { + "bbox": [ + 100, + 602, + 523, + 625 + ], + "type": "text", + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + } + ] + } + ], + "index": 25 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 86, + 642, + 184, + 654 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 86, + 642, + 184, + 654 + ], + "spans": [ + { + "bbox": [ + 86, + 642, + 184, + 654 + ], + "type": "text", + "content": "Critic System Prompt" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 85, + 662, + 525, + 700 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 662, + 525, + 700 + ], + "spans": [ + { + "bbox": [ + 85, + 662, + 525, + 700 + ], + "type": "text", + "content": "You are a critic that will help reviewers improve their reviews. You are given a list of feedback to the reviewer comments of a machine learning paper submitted to a top-tier ML conference on OpenReview. The aim of the feedback is to guide a reviewer to improve their comments and re" + } + ] + } + ], + "index": 28 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "23" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 79, + 523, + 91 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 79, + 523, + 91 + ], + "spans": [ + { + "bbox": [ + 85, + 79, + 523, + 91 + ], + "type": "text", + "content": "view as a whole. Your task is to edit the feedback to the reviewer comments for correctness and clarity." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 102, + 524, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 102, + 524, + 126 + ], + "spans": [ + { + "bbox": [ + 84, + 102, + 524, + 126 + ], + "type": "text", + "content": "Here, feedback means the feedback given to the reviewer comments to improve them, so the feedback will be given to the reviewer." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 139, + 524, + 175 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 139, + 524, + 175 + ], + "spans": [ + { + "bbox": [ + 84, + 139, + 524, + 175 + ], + "type": "text", + "content": "Here are the guidelines that were followed to generate the feedback to the reviewer comments originally: {ACTOR_SYSTEM_PROMPT} . You should keep in mind to adhere to the above guidelines." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 85, + 187, + 237, + 199 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 187, + 237, + 199 + ], + "spans": [ + { + "bbox": [ + 85, + 187, + 237, + 199 + ], + "type": "text", + "content": "Here are step-by-step instructions:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 206, + 523, + 250 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 96, + 206, + 523, + 230 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 206, + 523, + 230 + ], + "spans": [ + { + "bbox": [ + 96, + 206, + 523, + 230 + ], + "type": "text", + "content": "1. Read the feedback list provided for reviewer comments, the full text of the review itself, and the paper about which the review was written." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 238, + 343, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 238, + 343, + 250 + ], + "spans": [ + { + "bbox": [ + 96, + 238, + 343, + 250 + ], + "type": "text", + "content": "2. Evaluate every piece of feedback in the feedback list:" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 121, + 258, + 523, + 332 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 121, + 258, + 523, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 258, + 523, + 304 + ], + "spans": [ + { + "bbox": [ + 121, + 258, + 523, + 304 + ], + "type": "text", + "content": "- For each feedback item, it is imperative that you evaluate the correctness of the feedback. If there is a quote in the feedback, ensure that the quote appears verbatim in the paper. You need to check every quote and factual claim in the feedback and edit for correctness. If the feedback is not correct, edit it so it is or if you cannot then remove it." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 121, + 309, + 523, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 309, + 523, + 332 + ], + "spans": [ + { + "bbox": [ + 121, + 309, + 523, + 332 + ], + "type": "text", + "content": "- For each feedback item, evaluate if it is clear. You should make sure it would not confuse or frustrate the reviewer who reads it." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 96, + 341, + 523, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 341, + 523, + 388 + ], + "spans": [ + { + "bbox": [ + 96, + 341, + 523, + 388 + ], + "type": "text", + "content": "3. Remove comment-feedback pairs that are too nitpicky, unnecessary, or superficial. Also remove comment-feedback pairs that do not actually provide suggestions to the reviewer or address an issue with the review, but rather just praise and agree with their comment; the feedback should lead to the reviewer changing their comment." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 108, + 392, + 523, + 415 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 108, + 392, + 523, + 415 + ], + "spans": [ + { + "bbox": [ + 108, + 392, + 523, + 415 + ], + "type": "text", + "content": "Here are some examples of comment-feedback pairs that should be entirely removed from the final feedback list:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 113, + 424, + 523, + 447 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 424, + 523, + 447 + ], + "spans": [ + { + "bbox": [ + 113, + 424, + 523, + 447 + ], + "type": "text", + "content": "(a) Reviewer comment: The novelty remains concerned. It seems that the algorithm combines MLR + a bisimulation loss." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 132, + 450, + 523, + 510 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 450, + 523, + 510 + ], + "spans": [ + { + "bbox": [ + 132, + 450, + 523, + 510 + ], + "type": "text", + "content": "Feedback to the reviewer: It would be helpful if you could elaborate on why you see the novelty as limited. In particular, it would be very helpful if you could discuss the key differences between simply combining MLR and bisimulation loss versus the approach proposed in this paper. This would give the authors a better chance to clarify the novelty of their work." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 113, + 514, + 523, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 113, + 514, + 523, + 573 + ], + "spans": [ + { + "bbox": [ + 113, + 514, + 523, + 573 + ], + "type": "text", + "content": "(b) Reviewer comment: The paper lacks a proper related work section, which makes it challenging for readers to quickly grasp the background and understand the previous works. It is crucial to include a comprehensive discussion on related works, especially regarding the variance-reduced ZO hard-thresholding algorithm and the variance reduction aspect." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 132, + 575, + 523, + 623 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 575, + 523, + 623 + ], + "spans": [ + { + "bbox": [ + 132, + 575, + 523, + 623 + ], + "type": "text", + "content": "Feedback to the reviewer: To make this comment more actionable, consider suggesting specific areas of related work that should be covered, such as key papers on ZO optimization, hard-thresholding methods, and variance reduction techniques in optimization. This will give the authors clearer guidance on what to include." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 114, + 627, + 523, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 627, + 523, + 710 + ], + "spans": [ + { + "bbox": [ + 114, + 627, + 523, + 710 + ], + "type": "text", + "content": "(c) Reviewer comment: The paper is not very well-presented and is hard to follow. First of all, it is unclear in the hybrid setting considered, what are the relative relations of the guest parties? In the introduction, it appears that they share the same feature space but have different sample IDs, however, in 3.1 they appear to have different dimensions and unclear alignment. It is suggested that the paper properly define the problem setting. A figure on how data is partitioned by different parties would also help." + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "24" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 132, + 79, + 524, + 115 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 79, + 524, + 115 + ], + "spans": [ + { + "bbox": [ + 132, + 79, + 524, + 115 + ], + "type": "text", + "content": "Feedback to the reviewer: Your suggestion for a clearer definition of the problem setting and a visual representation of data partitioning is excellent. This would significantly improve the paper's clarity and readability." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 114, + 118, + 524, + 167 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 118, + 524, + 167 + ], + "spans": [ + { + "bbox": [ + 114, + 118, + 524, + 167 + ], + "type": "text", + "content": "(d) Reviewer comment: 3) the model performance of the proposed methods still appear to be a little inferior to the centralized setting, not exactly \"comparable\" as claimed. It is important to understand whether the proposed method is \"lossless\" or \"lossy\" and why. I think more detailed examinations and explanations are needed here." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 132, + 169, + 524, + 217 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 169, + 524, + 217 + ], + "spans": [ + { + "bbox": [ + 132, + 169, + 524, + 217 + ], + "type": "text", + "content": "Feedback to the reviewer: Your observation about the performance gap between the proposed method and the centralized setting is insightful. Requesting a more detailed analysis of whether the method is lossless or lossy, along with explanations for any performance differences, would significantly enhance the paper's contribution." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 115, + 220, + 524, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 115, + 220, + 524, + 268 + ], + "spans": [ + { + "bbox": [ + 115, + 220, + 524, + 268 + ], + "type": "text", + "content": "(e) Reviewer comment: Q2: It appears that the introduced projection loss can be directly optimized with respect to the trigger " + }, + { + "bbox": [ + 115, + 220, + 524, + 268 + ], + "type": "inline_equation", + "content": "T" + }, + { + "bbox": [ + 115, + 220, + 524, + 268 + ], + "type": "text", + "content": ". What's the rationale behind setting an upper bound and optimizing the projection loss through this bound? Does this approach offer computational benefits?" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 132, + 270, + 524, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 132, + 270, + 524, + 318 + ], + "spans": [ + { + "bbox": [ + 132, + 270, + 524, + 318 + ], + "type": "text", + "content": "Feedback to the reviewer: This question effectively probes the authors' methodological choices. It's a clear and concise query that could lead to valuable insights about the paper's approach. The authors' response could provide important context about the trade-offs involved in their method." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 97, + 326, + 274, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 326, + 274, + 338 + ], + "spans": [ + { + "bbox": [ + 97, + 326, + 274, + 338 + ], + "type": "text", + "content": "4. Edit comments based on evaluations:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 121, + 346, + 523, + 387 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 121, + 346, + 523, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 346, + 523, + 370 + ], + "spans": [ + { + "bbox": [ + 121, + 346, + 523, + 370 + ], + "type": "text", + "content": "- Do not add any new points unless the previous feedback obviously missed something important." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 121, + 374, + 475, + 387 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 121, + 374, + 475, + 387 + ], + "spans": [ + { + "bbox": [ + 121, + 374, + 475, + 387 + ], + "type": "text", + "content": "- If you do not identify any issues with a comment-feedback pair, do not edit it." + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 96, + 393, + 524, + 438 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 97, + 393, + 524, + 418 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 393, + 524, + 418 + ], + "spans": [ + { + "bbox": [ + 97, + 393, + 524, + 418 + ], + "type": "text", + "content": "5. The feedback will be shared with the reviewers for them to improve their comments. Address the reviewer in the second person (e.g., \"you\") and do not refer to them as \"the reviewer.\"" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 96, + 426, + 508, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 426, + 508, + 438 + ], + "spans": [ + { + "bbox": [ + 96, + 426, + 508, + 438 + ], + "type": "text", + "content": "6. Return the feedback list in the format you received it in, where the pairs are formatted as:" + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 122, + 445, + 356, + 474 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 122, + 445, + 356, + 458 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 445, + 356, + 458 + ], + "spans": [ + { + "bbox": [ + 122, + 445, + 356, + 458 + ], + "type": "text", + "content": "- Comment: {{the verbatim comment of interest}}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 122, + 461, + 301, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 122, + 461, + 301, + 474 + ], + "spans": [ + { + "bbox": [ + 122, + 461, + 301, + 474 + ], + "type": "text", + "content": "- Feedback: {{your concise feedback}}" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 482, + 145, + 493 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 482, + 145, + 493 + ], + "spans": [ + { + "bbox": [ + 85, + 482, + 145, + 493 + ], + "type": "text", + "content": "Remember:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 99, + 502, + 524, + 624 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 99, + 502, + 450, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 502, + 450, + 514 + ], + "spans": [ + { + "bbox": [ + 99, + 502, + 450, + 514 + ], + "type": "text", + "content": "- You are a critic that will help reviewers improve their comments and reviews." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 99, + 521, + 455, + 533 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 521, + 455, + 533 + ], + "spans": [ + { + "bbox": [ + 99, + 521, + 455, + 533 + ], + "type": "text", + "content": "- Be concise, limiting your feedback for each reviewer comment to 1-2 sentences." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 99, + 541, + 479, + 554 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 541, + 479, + 554 + ], + "spans": [ + { + "bbox": [ + 99, + 541, + 479, + 554 + ], + "type": "text", + "content": "- Do not summarize your feedback at the end or include a preamble at the beginning." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 99, + 562, + 415, + 573 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 562, + 415, + 573 + ], + "spans": [ + { + "bbox": [ + 99, + 562, + 415, + 573 + ], + "type": "text", + "content": "- Do not repeat anything the reviewer already included in their review." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 99, + 582, + 373, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 582, + 373, + 594 + ], + "spans": [ + { + "bbox": [ + 99, + 582, + 373, + 594 + ], + "type": "text", + "content": "- Do not mention that you are using a checklist or guidelines." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 99, + 601, + 524, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 601, + 524, + 624 + ], + "spans": [ + { + "bbox": [ + 99, + 601, + 524, + 624 + ], + "type": "text", + "content": "- Do not address the authors at all or provide suggestions to the authors. You are only giving feedback to the reviewer." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 640, + 168, + 652 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 640, + 168, + 652 + ], + "spans": [ + { + "bbox": [ + 85, + 640, + 168, + 652 + ], + "type": "text", + "content": "Formatter Prompt" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 85, + 662, + 313, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 662, + 313, + 675 + ], + "spans": [ + { + "bbox": [ + 85, + 662, + 313, + 675 + ], + "type": "text", + "content": "Here is the feedback for you to format: {feedback}" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "25" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 24 + }, + { + "para_blocks": [ + { + "bbox": [ + 85, + 74, + 203, + 85 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 74, + 203, + 85 + ], + "spans": [ + { + "bbox": [ + 85, + 74, + 203, + 85 + ], + "type": "text", + "content": "Formatter System Prompt" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 95, + 525, + 143 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 95, + 525, + 143 + ], + "spans": [ + { + "bbox": [ + 83, + 95, + 525, + 143 + ], + "type": "text", + "content": "You will be given a set of feedback given to various reviewer comments in a peer review of a machine learning paper. Your response, which will be the list of reviewer comments and feedback to them, will be shared with the reviewers who wrote the review, so that they can improve their reviews and the peer review cycle." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 155, + 525, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 155, + 525, + 179 + ], + "spans": [ + { + "bbox": [ + 83, + 155, + 525, + 179 + ], + "type": "text", + "content": "Your task is to format the feedback into a structured format. You should format the feedback as a list of comment-feedback pairs:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 99, + 189, + 375, + 261 + ], + "type": "list", + "angle": 0, + "index": 7, + "blocks": [ + { + "bbox": [ + 99, + 189, + 283, + 201 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 189, + 283, + 201 + ], + "spans": [ + { + "bbox": [ + 99, + 189, + 283, + 201 + ], + "type": "text", + "content": "- Reviewer comment: " + }, + { + "bbox": [ + 99, + 189, + 283, + 201 + ], + "type": "inline_equation", + "content": "\\{\\{\\mathrm{a~comment}\\} \\}" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 99, + 208, + 375, + 221 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 208, + 375, + 221 + ], + "spans": [ + { + "bbox": [ + 99, + 208, + 375, + 221 + ], + "type": "text", + "content": "- Feedback to the reviewer: {{feedback to the comment}}" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 99, + 228, + 312, + 242 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 228, + 312, + 242 + ], + "spans": [ + { + "bbox": [ + 99, + 228, + 312, + 242 + ], + "type": "text", + "content": "- Reviewer comment: {{another comment}}" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 99, + 248, + 375, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 248, + 375, + 261 + ], + "spans": [ + { + "bbox": [ + 99, + 248, + 375, + 261 + ], + "type": "text", + "content": "- Feedback to the reviewer: {{feedback to the comment}}" + } + ] + } + ], + "index": 6 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 110, + 268, + 121, + 275 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 110, + 268, + 121, + 275 + ], + "spans": [ + { + "bbox": [ + 110, + 268, + 121, + 275 + ], + "type": "text", + "content": "中" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 286, + 525, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 286, + 525, + 310 + ], + "spans": [ + { + "bbox": [ + 83, + 286, + 525, + 310 + ], + "type": "text", + "content": "Your goal is to only keep feedback to the reviewers that can help them improve their comments. You should only pay attention to lines that start with \"Comment\" or \"Feedback\"." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 99, + 319, + 525, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 319, + 525, + 344 + ], + "spans": [ + { + "bbox": [ + 99, + 319, + 525, + 344 + ], + "type": "text", + "content": "- Only keep the comment-feedback pairs where the feedback can help improve the reviewer. If there is no suggestion for improvement, remove the entire comment-feedback pair." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 119, + 352, + 524, + 375 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 352, + 524, + 375 + ], + "spans": [ + { + "bbox": [ + 119, + 352, + 524, + 375 + ], + "type": "text", + "content": "- Here is an example of a comment-feedback pair that should be removed from the final feedback list:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 140, + 380, + 524, + 428 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 380, + 524, + 428 + ], + "spans": [ + { + "bbox": [ + 140, + 380, + 524, + 428 + ], + "type": "text", + "content": "* Reviewer comment: Section 2.2. \"It independently formulates new approaches\" → Is it a hallucination or a feature? It looks like a hallucination to me. If this is important for achieving good performance, can you provide an ablation study based on whether to allow new approaches or not?" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 140, + 430, + 524, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 140, + 430, + 524, + 466 + ], + "spans": [ + { + "bbox": [ + 140, + 430, + 524, + 466 + ], + "type": "text", + "content": "* Feedback to the reviewer: This is a thoughtful question about an important aspect of the methodology. Your suggestion for an ablation study is particularly valuable and could provide insights into the method's effectiveness." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 119, + 469, + 524, + 494 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 119, + 469, + 524, + 494 + ], + "spans": [ + { + "bbox": [ + 119, + 469, + 524, + 494 + ], + "type": "text", + "content": "- If the feedback says \"No changes needed\" or something with a similar meaning, remove the entire comment-feedback pair." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 99, + 501, + 524, + 601 + ], + "type": "list", + "angle": 0, + "index": 18, + "blocks": [ + { + "bbox": [ + 99, + 501, + 524, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 501, + 524, + 525 + ], + "spans": [ + { + "bbox": [ + 99, + 501, + 524, + 525 + ], + "type": "text", + "content": "- Do not modify the content of the feedback at all, only format it into the bullet point format described above." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 99, + 533, + 524, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 533, + 524, + 570 + ], + "spans": [ + { + "bbox": [ + 99, + 533, + 524, + 570 + ], + "type": "text", + "content": "- The response you send will be immediately shared with the reviewers. Thus, there should be NO OTHER TEXT in the output, for example no preamble or conclusion sentences. Only respond with the list of feedback & reviewer comment bullets, and no other text." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 99, + 577, + 524, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 577, + 524, + 601 + ], + "spans": [ + { + "bbox": [ + 99, + 577, + 524, + 601 + ], + "type": "text", + "content": "- Since your response will immediately be sent to the reviewers, if there is no feedback, just say \"Thanks for your hard work!\"." + } + ] + } + ], + "index": 17 + } + ], + "sub_type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "26" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 25 + }, + { + "para_blocks": [ + { + "bbox": [ + 84, + 72, + 367, + 85 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 72, + 367, + 85 + ], + "spans": [ + { + "bbox": [ + 84, + 72, + 367, + 85 + ], + "type": "text", + "content": "We also provide the prompt used for the incorporation analysis:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 92, + 224, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 92, + 224, + 105 + ], + "spans": [ + { + "bbox": [ + 85, + 92, + 224, + 105 + ], + "type": "text", + "content": "Incorporation Analysis Prompt" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 84, + 114, + 526, + 139 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 114, + 526, + 139 + ], + "spans": [ + { + "bbox": [ + 84, + 114, + 526, + 139 + ], + "type": "text", + "content": "Task: Determine if the following feedback suggestion was incorporated into the modified version of a review. Also, categorize the given feedback into exactly one of these three categories:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 96, + 146, + 524, + 305 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 97, + 146, + 524, + 194 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 146, + 524, + 194 + ], + "spans": [ + { + "bbox": [ + 97, + 146, + 524, + 194 + ], + "type": "text", + "content": "1. ACTIONABLE_VAGUE: Encouraging reviewers to rephrase vague review comments, making them more actionable for the authors. For example, the feedback says: \"It would be helpful to suggest specific baselines that you think must be included. Are there particular methods you feel are missing from the current comparison? Could you elaborate why?\"" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 96, + 201, + 524, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 201, + 524, + 250 + ], + "spans": [ + { + "bbox": [ + 96, + 201, + 524, + 250 + ], + "type": "text", + "content": "2. CONTENTClarIFY: Highlighting sections of the paper that may already address some of the reviewer's questions (clarifying content). For example, the feedback says: \"Does Figure 5 of the paper answer your question? In particular: 'In Transformers, the proposed technique provides " + }, + { + "bbox": [ + 96, + 201, + 524, + 250 + ], + "type": "inline_equation", + "content": "25\\%" + }, + { + "bbox": [ + 96, + 201, + 524, + 250 + ], + "type": "text", + "content": " relative improvement in wall-clock time (Figure 5)'.\"" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 96, + 258, + 524, + 305 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 96, + 258, + 524, + 305 + ], + "spans": [ + { + "bbox": [ + 96, + 258, + 524, + 305 + ], + "type": "text", + "content": "3. ADDRESS_UNPROFESSIONAL: Identifying and addressing unprofessional or inappropriate remarks in the review. For example, the feedback says: \"We appreciate your review, but kindly request that you focus your comments on the specific content and methodology of the paper rather than making personal remarks about the authors.\"" + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 314, + 151, + 325 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 314, + 151, + 325 + ], + "spans": [ + { + "bbox": [ + 85, + 314, + 151, + 325 + ], + "type": "text", + "content": "Instructions:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 97, + 334, + 524, + 473 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 97, + 334, + 313, + 346 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 334, + 313, + 346 + ], + "spans": [ + { + "bbox": [ + 97, + 334, + 313, + 346 + ], + "type": "text", + "content": "1. Read the original review and modified review." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 97, + 354, + 451, + 365 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 354, + 451, + 365 + ], + "spans": [ + { + "bbox": [ + 97, + 354, + 451, + 365 + ], + "type": "text", + "content": "2. Read the reviewer's original comment and the feedback given to the reviewer." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 97, + 373, + 524, + 432 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 373, + 524, + 432 + ], + "spans": [ + { + "bbox": [ + 97, + 373, + 524, + 432 + ], + "type": "text", + "content": "3. Determine if the changes suggested in the feedback were incorporated into the modified review as compared to the original review. If the reviewer's original comment appears verbatim in the modified review still, you should return FALSE for the incorporation. The incorporations should be clear and quite explicit. Think critically about if the incorporation is significant enough to count." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 97, + 441, + 513, + 453 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 441, + 513, + 453 + ], + "spans": [ + { + "bbox": [ + 97, + 441, + 513, + 453 + ], + "type": "text", + "content": "4. Determine which of the three categories best describes the primary purpose of the feedback." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 97, + 461, + 318, + 473 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 97, + 461, + 318, + 473 + ], + "spans": [ + { + "bbox": [ + 97, + 461, + 318, + 473 + ], + "type": "text", + "content": "5. Think step by step and explain your reasoning." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 84, + 481, + 524, + 505 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 481, + 524, + 505 + ], + "spans": [ + { + "bbox": [ + 84, + 481, + 524, + 505 + ], + "type": "text", + "content": "Output Format: Please provide your final answer as two comma-separated values between tags, where:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 99, + 513, + 524, + 556 + ], + "type": "list", + "angle": 0, + "index": 17, + "blocks": [ + { + "bbox": [ + 99, + 513, + 501, + 525 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 513, + 501, + 525 + ], + "spans": [ + { + "bbox": [ + 99, + 513, + 501, + 525 + ], + "type": "text", + "content": "- The first boolean is TRUE or FALSE depending on whether the feedback was incorporated." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 99, + 533, + 524, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 99, + 533, + 524, + 556 + ], + "spans": [ + { + "bbox": [ + 99, + 533, + 524, + 556 + ], + "type": "text", + "content": "- The second string is one of these three options: ACTIONABLE_VAGUE, CONTENT_CLRIFY, or ADDRESS_UNPROFESSIONAL." + } + ] + } + ], + "index": 16 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 85, + 564, + 342, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 564, + 342, + 577 + ], + "spans": [ + { + "bbox": [ + 85, + 564, + 342, + 577 + ], + "type": "text", + "content": "Example: TRUE, ACTIONABLE_VAGUE" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 69, + 602, + 212, + 618 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 602, + 212, + 618 + ], + "spans": [ + { + "bbox": [ + 69, + 602, + 212, + 618 + ], + "type": "text", + "content": "B Reliability tests" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 68, + 627, + 541, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 627, + 541, + 651 + ], + "spans": [ + { + "bbox": [ + 68, + 627, + 541, + 651 + ], + "type": "text", + "content": "We generated the following reliability tests to be run in real-time after feedback was generated. For each reliability test, we provide examples of feedback that would fail it:" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 81, + 656, + 538, + 679 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 656, + 538, + 679 + ], + "spans": [ + { + "bbox": [ + 81, + 656, + 538, + 679 + ], + "type": "text", + "content": "1. Praising the reviewer: make sure the feedback does not simply praise what the reviewer wrote without providing critical suggestions to improve their comment." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 93, + 680, + 500, + 693 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 680, + 500, + 693 + ], + "spans": [ + { + "bbox": [ + 93, + 680, + 500, + 693 + ], + "type": "text", + "content": "Example feedback: \"This is a good question that challenges a key assumption of the paper.\"" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 80, + 698, + 541, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 698, + 541, + 723 + ], + "spans": [ + { + "bbox": [ + 80, + 698, + 541, + 723 + ], + "type": "text", + "content": "2. Addressing feedback to the author: certify that the feedback is addressed to the reviewer with suggestions to make their review better, rather than addressed to the author of the paper with suggestions" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "27" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 26 + }, + { + "para_blocks": [ + { + "bbox": [ + 93, + 72, + 261, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 72, + 261, + 83 + ], + "spans": [ + { + "bbox": [ + 93, + 72, + 261, + 83 + ], + "type": "text", + "content": "on how they can improve their paper." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 93, + 84, + 541, + 133 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 93, + 84, + 541, + 133 + ], + "spans": [ + { + "bbox": [ + 93, + 84, + 541, + 133 + ], + "type": "text", + "content": "Example feedback: \"To strengthen your paper, consider discussing the relationship between FrugalGPT and traditional ensembling techniques. Highlight both similarities and differences and explain how this relates to the observed quality improvements. This would provide more context for your results and situate your work within the broader field of machine learning.\"" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 80, + 140, + 541, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 140, + 541, + 163 + ], + "spans": [ + { + "bbox": [ + 80, + 140, + 541, + 163 + ], + "type": "text", + "content": "3. Restate what the reviewer wrote: does the feedback simply restate what the review comment says without providing any new meaningful and unique suggestions?" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 92, + 164, + 539, + 187 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 164, + 539, + 187 + ], + "spans": [ + { + "bbox": [ + 92, + 164, + 539, + 187 + ], + "type": "text", + "content": "Example reviewer comment: Can examples or further clarification be given for the 3.1 sentence \"enhancing the accountability of the output\"? This isn't clear, at least to me." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 92, + 188, + 541, + 224 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 92, + 188, + 541, + 224 + ], + "spans": [ + { + "bbox": [ + 92, + 188, + 541, + 224 + ], + "type": "text", + "content": "Example feedback: This is a good point that could lead to improved clarity in the paper. To make your comment more actionable, you could ask the authors to provide examples or further clarification for the sentence \"enhancing the accountability of the output\"." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 80, + 232, + 541, + 255 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 232, + 541, + 255 + ], + "spans": [ + { + "bbox": [ + 80, + 232, + 541, + 255 + ], + "type": "text", + "content": "4. Format is correct: ensure that all feedback pairs are in the correct format, protecting against any errors in the pipeline that could have led to malformed feedback." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 274, + 511, + 289 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 274, + 511, + 289 + ], + "spans": [ + { + "bbox": [ + 68, + 274, + 511, + 289 + ], + "type": "text", + "content": "C Average score changes during review and rebuttal periods" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 298, + 541, + 392 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 298, + 541, + 392 + ], + "spans": [ + { + "bbox": [ + 67, + 298, + 541, + 392 + ], + "type": "text", + "content": "In Figure S1A, we examined the potential change in review scores (soundness, presentation, contribution, rating, and confidence) between the initial and modified reviews across the groups during the review period. We found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). We also saw that of reviewers who received feedback, reviewers who updated their review were significantly more likely to decrease their soundness score and increase their confidence score at the end of the review period (before the rebuttal period began) compared to those who did not update their review. This suggests that reviewers who updated their reviews became more confident in their assessments." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 394, + 541, + 455 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 394, + 541, + 455 + ], + "spans": [ + { + "bbox": [ + 67, + 394, + 541, + 455 + ], + "type": "text", + "content": "In Figure S1B, we conducted the same analysis during the rebuttal period. Similar to the review period, we found that reviewers who were selected to receive feedback did not change their scores more than those in the control group (top panel). Of reviewers who received feedback, those who updated their reviews significantly increased all scores except confidence compared to those who did not update their reviews. From this, we see that reviewers who updated their reviews were much more engaged in the rebuttal process." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 68, + 472, + 320, + 488 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 472, + 320, + 488 + ], + "spans": [ + { + "bbox": [ + 68, + 472, + 320, + 488 + ], + "type": "text", + "content": "D Incorporation model validation" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 496, + 541, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 496, + 541, + 629 + ], + "spans": [ + { + "bbox": [ + 67, + 496, + 541, + 629 + ], + "type": "text", + "content": "To test our incorporation model, we hand-labeled a test set of 222 feedback items (from 63 randomly chosen reviews that had been updated) as being incorporated into the updated review or not. We labeled 132 of those items as incorporated (59.5%) and 90 as not (40.5%). We then ran those 222 feedback items through the LLM pipeline and received a 92% accuracy rate, with a false negative rate of 0.9% and a false positive rate of 5.9% (see Supplementary Figure S2). Of the false positives, 8/13 were instances of human error where the labeler missed that the item was incorporated into the review, and the model accurately identified this incorporation. The remaining 5 false positives were due to subjectivity - the model reasoned that the reviewer partially incorporated the sentiments of the feedback, whereas the labeler did not view that as sufficient enough to count as incorporated. The two false negatives represent data points the labeler initially mislabeled and the model correctly labeled. This effectively gives us a false negative rate of 0% and a false positive rate of 2.25%, allowing us to be confident that our incorporation pipeline was highly accurate." + } + ] + } + ], + "index": 10 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "28" + } + ] + } + ], + "index": 11 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 27 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 78, + 177, + 297, + 486 + ], + "blocks": [ + { + "bbox": [ + 71, + 167, + 81, + 176 + ], + "lines": [ + { + "bbox": [ + 71, + 167, + 81, + 176 + ], + "spans": [ + { + "bbox": [ + 71, + 167, + 81, + 176 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 78, + 177, + 297, + 486 + ], + "lines": [ + { + "bbox": [ + 78, + 177, + 297, + 486 + ], + "spans": [ + { + "bbox": [ + 78, + 177, + 297, + 486 + ], + "type": "image", + "image_path": "9dee71417f024a75311dcca739b498277ce5db9e87bd392a85b26aa9f7e2dc82.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "lines": [ + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "spans": [ + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": "Supplementary Figure S1: (A) Review period score changes. (Top) There is no significant difference in updating scores (measured between initial and pre-rebuttal reviews) between the feedback and control groups. (Bottom) Among reviewers who received feedback, those who updated their reviews were more likely to decrease soundness scores " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "(\\mathrm{p} \\leq 0.05)" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": " and increase confidence scores " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "(\\mathrm{p} \\leq 0.05)" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": " compared to those who did not update their reviews. (B) Rebuttal period score changes. (Top) There is no significant difference in updating scores (measured between pre- and post-rebuttal reviews) between the feedback and control groups. (Bottom) Among feedback recipients, reviewers who updated their reviews demonstrated significantly larger score increases across all metrics (soundness: " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "^{**}\\mathrm{p} \\leq 0.01" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": "; presentation: " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "^{***}\\mathrm{p} \\leq 0.001" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": "; contribution: " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "^{\\ast}\\mathrm{p} \\leq 0.05" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": "; rating: " + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "inline_equation", + "content": "^{***}\\mathrm{p} \\leq 0.001" + }, + { + "bbox": [ + 67, + 517, + 541, + 625 + ], + "type": "text", + "content": ") except confidence, compared to non-updaters." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_caption" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 320, + 177, + 536, + 486 + ], + "blocks": [ + { + "bbox": [ + 312, + 168, + 320, + 176 + ], + "lines": [ + { + "bbox": [ + 312, + 168, + 320, + 176 + ], + "spans": [ + { + "bbox": [ + 312, + 168, + 320, + 176 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 320, + 177, + 536, + 486 + ], + "lines": [ + { + "bbox": [ + 320, + 177, + 536, + 486 + ], + "spans": [ + { + "bbox": [ + 320, + 177, + 536, + 486 + ], + "type": "image", + "image_path": "5d0e819082fc1eb3e1244c31df2483dcf42bb1a4e7d886a57d2f157e222f01be.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "29" + } + ] + } + ], + "index": 5 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 28 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 126, + 222, + 141, + 240 + ], + "blocks": [ + { + "bbox": [ + 73, + 219, + 82, + 227 + ], + "lines": [ + { + "bbox": [ + 73, + 219, + 82, + 227 + ], + "spans": [ + { + "bbox": [ + 73, + 219, + 82, + 227 + ], + "type": "text", + "content": "A" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 126, + 222, + 141, + 240 + ], + "lines": [ + { + "bbox": [ + 126, + 222, + 141, + 240 + ], + "spans": [ + { + "bbox": [ + 126, + 222, + 141, + 240 + ], + "type": "image", + "image_path": "c919e5fe32eb32c2e054bbd43c926c9021b22fba46f76a309bc76e44d87bdac0.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 143, + 224, + 216, + 238 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 143, + 224, + 216, + 238 + ], + "spans": [ + { + "bbox": [ + 143, + 224, + 216, + 238 + ], + "type": "text", + "content": "Original review" + } + ] + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 126, + 256, + 141, + 274 + ], + "blocks": [ + { + "bbox": [ + 126, + 256, + 141, + 274 + ], + "lines": [ + { + "bbox": [ + 126, + 256, + 141, + 274 + ], + "spans": [ + { + "bbox": [ + 126, + 256, + 141, + 274 + ], + "type": "image", + "image_path": "1e9dd95be4cbe45e120ff063aafa70f494240cc5380b6e024acba14c412c4f76.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 73, + 328, + 82, + 337 + ], + "lines": [ + { + "bbox": [ + 73, + 328, + 82, + 337 + ], + "spans": [ + { + "bbox": [ + 73, + 328, + 82, + 337 + ], + "type": "text", + "content": "B" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 3 + }, + { + "bbox": [ + 141, + 258, + 220, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 141, + 258, + 220, + 270 + ], + "spans": [ + { + "bbox": [ + 141, + 258, + 220, + 270 + ], + "type": "text", + "content": "Modified review" + } + ] + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 126, + 289, + 141, + 308 + ], + "blocks": [ + { + "bbox": [ + 126, + 289, + 141, + 308 + ], + "lines": [ + { + "bbox": [ + 126, + 289, + 141, + 308 + ], + "spans": [ + { + "bbox": [ + 126, + 289, + 141, + 308 + ], + "type": "image", + "image_path": "b6314c2a0c17b97ed691d22b6db7c48ab77ca5a01e5acc6e666823c22a6a99e5.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "bbox": [ + 142, + 292, + 216, + 304 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 142, + 292, + 216, + 304 + ], + "spans": [ + { + "bbox": [ + 142, + 292, + 216, + 304 + ], + "type": "text", + "content": "Feedback item" + } + ] + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 258, + 251, + 361, + 279 + ], + "blocks": [ + { + "bbox": [ + 258, + 251, + 361, + 279 + ], + "lines": [ + { + "bbox": [ + 258, + 251, + 361, + 279 + ], + "spans": [ + { + "bbox": [ + 258, + 251, + 361, + 279 + ], + "type": "image", + "image_path": "d1d3ea6c5c822412e605337fe0c301513ef3ce178abed41a323ab4f830aec80e.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "bbox": [ + 370, + 251, + 487, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 370, + 251, + 487, + 279 + ], + "spans": [ + { + "bbox": [ + 370, + 251, + 487, + 279 + ], + "type": "text", + "content": "Feedback incorporated (Y/N)" + } + ] + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 211, + 327, + 394, + 476 + ], + "blocks": [ + { + "bbox": [ + 211, + 327, + 394, + 476 + ], + "lines": [ + { + "bbox": [ + 211, + 327, + 394, + 476 + ], + "spans": [ + { + "bbox": [ + 211, + 327, + 394, + 476 + ], + "type": "image", + "image_path": "db0d37deb2bcaec229f5f0d8eebc6e9fa349e0043920244e80891c021de2bd95.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 68, + 504, + 541, + 576 + ], + "lines": [ + { + "bbox": [ + 68, + 504, + 541, + 576 + ], + "spans": [ + { + "bbox": [ + 68, + 504, + 541, + 576 + ], + "type": "text", + "content": "Supplementary Figure S2: (A) Incorporation model pipeline. Given the original review text, modified review text, and individual feedback item, the LLM determined if the feedback was incorporated into the modified review or not. (B) Model accuracy. Our incorporation model successfully labeled " + }, + { + "bbox": [ + 68, + 504, + 541, + 576 + ], + "type": "inline_equation", + "content": "92\\%" + }, + { + "bbox": [ + 68, + 504, + 541, + 576 + ], + "type": "text", + "content": " of the test feedback items, where human annotators determined the ground truth labeling. Of the false positives, the majority were instances of human error where the model accurately identified the missed incorporation. All of the false negatives were instances of human error that the model caught." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "spans": [ + { + "bbox": [ + 299, + 741, + 311, + 750 + ], + "type": "text", + "content": "30" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 29 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_content_list.json b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_content_list.json new file mode 100644 index 0000000000000000000000000000000000000000..dbf4aaf0838122f6e559c0948a9adbe331572fb5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_content_list.json @@ -0,0 +1,2639 @@ +[ + { + "type": "text", + "text": "Ryota Tanaka $^{1,2}$ Taichi Iki $^{1}$ Taku Hasegawa $^{1}$ Kyosuke Nishida $^{1}$ Kuniko Saito $^{1}$ Jun Suzuki $^{2}$ $^{1}$ NTT Human Informatics Laboratories, NTT Corporation \nhttps://vdocrag.github.io", + "bbox": [ + 99, + 183, + 911, + 239 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Abstract", + "text_level": 1, + "bbox": [ + 246, + 272, + 326, + 287 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "We aim to develop a retrieval-augmented generation (RAG) framework that answers questions over a corpus of visually-rich documents presented in mixed modalities (e.g., charts, tables) and diverse formats (e.g., PDF, PPTX). In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied documents and modalities in a unified image format to prevent missing information that occurs by parsing documents to obtain text. To improve the performance, we propose novel self-supervised pre-training tasks that adapt large vision-language models for retrieval by compressing visual information into dense token representations while aligning them with textual content in documents. Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain document visual question answering datasets, encompassing diverse document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments show that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization capability, highlighting the potential of an effective RAG paradigm for real-world documents.", + "bbox": [ + 88, + 303, + 485, + 636 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "1. Introduction", + "text_level": 1, + "bbox": [ + 89, + 662, + 220, + 679 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Large language models (LLMs) have demonstrated impressive performance on diverse natural language tasks [2, 16, 24, 55]. These models struggle with factual errors despite their increased model and data scale [39, 40]. To remedy this problem, retrieval-augmented generation (RAG) methods [18, 31] can retrieve knowledge from an external corpus, potentially reducing hallucination and increasing knowledge coverage. Most previous RAG frameworks assume the context is composed entirely of text, with no graphical elements. In contrast, a significant amount of real-world information is stored in visually-rich documents, such as charts, tables, web pages, and office documents. These documents often contain both textual and visual objects, with content spread structurally across various loca", + "bbox": [ + 88, + 688, + 482, + 900 + ], + "page_idx": 0 + }, + { + "type": "image", + "img_path": "images/dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg", + "image_caption": [ + "Figure 1. Our framework of VDocRAG and examples from OpenDocVQA. VDocRAG consists of VDocRetirver and VDocGenerator, which can retrieve relevant documents and generate answers by understanding the original appearance of documents." + ], + "image_footnote": [], + "bbox": [ + 524, + 273, + 893, + 512 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "tions depending on diverse formats and types.", + "bbox": [ + 511, + 613, + 816, + 628 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "Thus, document visual question answering (DocumentVQA) [42, 43, 56, 57] aims to build an agent capable of reading and comprehending document images to answer the question. Here, most existing DocumentVQA questions operate in a closed setting without requiring any retrieval. While this definition simplifies the QA model, it does not reflect many real-world use cases where the question is asked through some open-domain natural language interface, such as QA systems searching information across in-house documents or customer service chatbots on e-commerce websites. To address this limitation, recent works have introduced retrieval tasks on document images [17, 37]. However, these cannot fully develop models that effectively integrate the retrieved information into the final output. This gap hinders the application of DocumentVQA models in more realistic, open-domain scenarios.", + "bbox": [ + 509, + 628, + 906, + 869 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied docu", + "bbox": [ + 511, + 869, + 903, + 900 + ], + "page_idx": 0 + }, + { + "type": "aside_text", + "text": "arXiv:2504.09795v1 [cs.CL] 14 Apr 2025", + "bbox": [ + 22, + 265, + 60, + 707 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 98, + 123, + 132, + 152 + ], + "page_idx": 0 + }, + { + "type": "header", + "text": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents", + "bbox": [ + 133, + 133, + 898, + 156 + ], + "page_idx": 0 + }, + { + "type": "text", + "text": "ments and modalities in a unified image format to avoid tedious parsing and potential information loss that occurs in conventional text-based RAG. As depicted in Figure 1, VDocRAG consists of two main components, both of which effectively leverage the visual features of documents. First, VDocRetriever retrieves document images related to the question from a corpus of document images. Second, VDocGenerator uses these retrieved images to generate the answer. To encode document images and interact with the encoded information, we adapt pre-trained large vision language models (LVLMs) [1, 29] as the backbone for VDocRAG. Since LVLMs are inherently generative models, it is sub-optimal for embeddings as they prevent the representations from capturing information across the entire input sequence due to the training objective (i.e., next-token prediction). To bridge this gap, we introduce new self-supervised pre-training tasks that harness the understanding and generation capabilities of LVLMs to enhance representation learning. Specifically, we compress the entire image representation into a dense token representation, by aligning the text in documents via retrieval and generation tasks.", + "bbox": [ + 89, + 90, + 480, + 407 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain DocumentVQA datasets encompassing a wide range of document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments demonstrate that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization performance.", + "bbox": [ + 89, + 407, + 480, + 542 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Our main contributions are summarized as follows:", + "bbox": [ + 109, + 544, + 450, + 558 + ], + "page_idx": 1 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- We introduce a new RAG framework, VDocRAG, which can directly understand diverse real-world documents purely from visual features.", + "- We are the first to explore pre-training tasks designed for document retrieval-oriented adaptation of LVLMs, by compressing visual document representations.", + "- We introduce OpenDocVQA, the first unified open-domain DocumentVQA dataset with diverse documents." + ], + "bbox": [ + 89, + 559, + 482, + 679 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "2. Related Work", + "text_level": 1, + "bbox": [ + 89, + 693, + 232, + 709 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Retrieval-augmented generation (RAG). RAG in the NLP community aims at retrieving external knowledge to reduce factual errors and enhance performance in various knowledge-intensive tasks [3, 5, 39, 40, 49]. Inspired by the success of RAG in NLP, this technique has also applied applications across different domains, including images [8, 50, 51, 64], codes [45, 70], videos [7, 61], audio [26, 62], and 3D [53, 69]. However, most existing works have focused on retrieving knowledge from only plain-text documents or non-text media. In contrast, we tackle the challenge of extracting knowledge from visually-rich documents organized in complex, multimodal formats.", + "bbox": [ + 89, + 719, + 482, + 900 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Visual document retrieval and visual RAG. With the success of LLMs, there is a growing trend to build large vision language models (LVLMs) that integrate image understanding capabilities by combining image encoders [32, 48, 67] with LLMs [1, 10, 29, 33, 35, 58]. Concurrent works in visual document retrieval [13, 17, 37] and visual RAG [9, 38, 66] leverage LVLMs to directly encode visually-rich documents through images. However, these approaches have trouble understanding diverse real-world documents due to the limitations of their datasets and training strategies. The existing visual document retrieval dataset, ViDoRe [37], contains questions that might not require retrieval and handles a limited number of document types, resulting in a gap between real-world scenarios. In contrast, our dataset covers open document types and provides questions that are verified by humans to require retrieval and to have context-independent conditions for the retrieval. From the perspective of training, despite the significant gap between generative pre-training tasks and retrieval tasks in LVLMs, previous works [9, 17, 37, 38, 66] leverage LVLMs without specific training for bridging the gap. To address this, we introduce pre-training tasks that transfer the understanding and generation capabilities of LVLMs to retrievers.", + "bbox": [ + 511, + 90, + 906, + 453 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Document visual question answering (DocumentVQA). DocumentVQA is a high-level document understanding task that involves answering questions on visually-rich documents. These documents include a variety of elements, such as handwritten and digital text [42, 56], complex layouts [28, 68, 71], and graphical elements [41, 43, 57]. However, previous studies have assumed closed settings that do not require retrieval, except for Dureader_vis [46]. Our work differs from Dureader_vis as follows. First, OpenDocVQA covers a wide range of document formats and domains, while Dureader_vis focuses on screenshots of websites, limiting its generalizability. Second, OpenDocVQA reflects more real-world scenarios that require both single- and multi-hop reasoning over documents, while Dureader_vis requires only single-hop reasoning. Lastly, even lexical search methods yield sufficient performance in Dureader_vis due to its reliance on textual content. In contrast, OpenDocVQA requires a visual semantic search where visual and contextual information can be exploited.", + "bbox": [ + 511, + 474, + 906, + 763 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3. OpenDocVQA Task and Dataset", + "text_level": 1, + "bbox": [ + 511, + 777, + 808, + 794 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "3.1. Task Formulation", + "text_level": 1, + "bbox": [ + 511, + 801, + 687, + 816 + ], + "page_idx": 1 + }, + { + "type": "text", + "text": "Given a large collection of $N$ document images $\\mathcal{I} = \\{I_1,\\dots,I_N\\}$ and a question $Q$ , the goal of OpenDocVQA task is to output an answer $A$ by finding the relevant $k$ images $\\hat{\\mathcal{I}}\\in \\mathcal{I}$ , where $k\\ll N$ . We decompose the task into two stages. Visual document retrieval: given $Q$ and $\\mathcal{I}$ ,", + "bbox": [ + 511, + 824, + 906, + 902 + ], + "page_idx": 1 + }, + { + "type": "image", + "img_path": "images/f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg", + "image_caption": [ + "Figure 2. Process of creating multi-hop DocumentVQA questions." + ], + "image_footnote": [], + "bbox": [ + 94, + 89, + 495, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "the model retrieves the relevant $k$ images $\\hat{\\mathcal{I}}$ from which to derive the answer. DocumentVQA: the model takes $Q$ and the retrieved images $\\hat{\\mathcal{I}}$ as input, to generate $A$ .", + "bbox": [ + 89, + 289, + 482, + 335 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "OpenDocVQA covers multiple open-domain DocumentVQA datasets with diverse document types. To reflect real-world scenarios, we evaluate models with both single-pool and all-pool settings. In the single-pool setting, retrieval is performed from a specific pool of documents provided by each original dataset. The all-pool setting requires retrieving from the entire candidate pool, which includes documents from a wide range of domains.", + "bbox": [ + 89, + 335, + 483, + 455 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.2. Dataset Collection", + "text_level": 1, + "bbox": [ + 89, + 465, + 267, + 479 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Filtering of DocumentVQA datasets. We collected and filtered instances of seven existing document VQA datasets [28, 41-43, 56, 57, 68]. Most of their questions are context-dependent conditions, where they cannot be answered without referencing the accompanying document (e.g., What is the title?). Therefore, we filtered out questions lacking sufficient context for retrieval. To address this, we initially applied heuristic rules to automatically select likely context-independent questions, reducing the pool by $20.9\\%$ . Then, we manually reviewed and verified the remaining examples to ensure their context independence.", + "bbox": [ + 89, + 487, + 483, + 652 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Reformulation of TableQA dataset. We used QA pairs from Open-WikiTable [27], an open-domain TableQA dataset that required retrieving tables from Wikipedia to answer the question. Since the original dataset provides tables in only textual format (HTML data), we took the screenshot images of tables from the corresponding Wikipedia pages to reformulate the task as the OpenDocVQA.", + "bbox": [ + 89, + 671, + 483, + 777 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Creation of new multi-hop questions. To enhance the model's ability to interact with multiple document sources (e.g., charts and tables), we semi-automatically created a multi-hop DocumentVQA dataset, MHDocVQA, using the single-hop QA pairs collected in the previous steps. As shown in Figure 2, the creating process involved the following steps: (1) We first used spaCy [19] to identify a bridge", + "bbox": [ + 89, + 794, + 483, + 902 + ], + "page_idx": 2 + }, + { + "type": "table", + "img_path": "images/00e4521b82c8ff62a3115b30dc25ed033f6dacd64ce4acc20b5c55599e537218.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ViDoRe [17]Dureadervis [46]OpenDocVQA
Retrieval
QA
Context-Independent
Visual Semantic Search
Multi-Hop
Document ContentsT, L, F, C, DT, LT, L, F, C, D
Answer Types-ExtExt, Abs
#Document Types61Open
#QAs3,81015,00043,474
#Images (Pages)8,310158,000206,267
", + "bbox": [ + 517, + 88, + 905, + 238 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1. Comparison of related datasets. Document contents include (T)able, (L)ist, (F)igure, (C)hart, and (D)iagram. Answer types are Extractive (Ext) and Abstractive (Abs).", + "bbox": [ + 511, + 248, + 906, + 290 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "entity (e.g., Denmark) in the answer to a single-hop question and then searched for this entity in other single-hop questions. (2) Next, we used Mixtral-8x22B [24] to combine the two single-hop questions. (3) We filtered the generated multi-hop questions using another LLM (GPT-4o [2]), which answered the questions based on the context of the two initial single-hop questions and their answers. If the predicted answer was the same as the answer to the second single-hop question, the multi-hop question was validated. Finally, we manually reviewed the filtered questions to ensure their quality before including them in our dataset.", + "bbox": [ + 511, + 320, + 906, + 487 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Negative candidates mining. We produced negative image candidates for retrievers to sift through for every question, used only during inference. We first extracted OCR text from images in the COYO-700M dataset [6], a web-scaled image collection. Subsequently, we mined negative images where the OCR text exhibits high lexical overlap with the question but does not contain the correct answer.", + "bbox": [ + 511, + 513, + 905, + 621 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "3.3. Comparison with Related Datasets", + "text_level": 1, + "bbox": [ + 511, + 635, + 816, + 651 + ], + "page_idx": 2 + }, + { + "type": "text", + "text": "Table 1 shows the statistics of OpenDocVQA and other related datasets, including ViDoRe [17] and Dureader_vis [46]. OpenDocVQA has three unique key properties: First, it is the first large-scale collection of open-domain DocumentVQA datasets to address open document types, whereas ViDoRe considers six document types for only the retrieval task and Dureader_vis is limited to webpages. Second, the questions in OpenDocVQA are context-independent and require visual semantic search, whereas ViDoRe's questions are context-dependent, and even lexical search methods yield sufficient performance in Dureader_vis. This indicates our dataset better reflects real-world scenarios. Lastly, unlike ViDoRe and Dureader_vis, OpenDocVQA requires multi-hop reasoning with extractive (e.g., span, list) and abstractive (e.g., arithmetic, counting, no answer) answer types, providing a more challenging setting.", + "bbox": [ + 511, + 657, + 906, + 902 + ], + "page_idx": 2 + }, + { + "type": "image", + "img_path": "images/621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg", + "image_caption": [ + "Figure 3. Overview of our VDocRAG model. VDocRetriever retrieves document images related to the question from a corpus of document images, and VDocGenerator uses these retrieved images to generate the answer." + ], + "image_footnote": [], + "bbox": [ + 112, + 89, + 493, + 262 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 498, + 90, + 879, + 263 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4. Proposed Model", + "text_level": 1, + "bbox": [ + 89, + 330, + 251, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.1. Architecture Overview", + "text_level": 1, + "bbox": [ + 89, + 356, + 302, + 372 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "As shown in Figure 3, VDocRAG consists of two components: VDocRetriever and VDocGenerator. Our approach adopts the pre-trained LVLMs to unify the varied formats and modalities in a single form as an image for direct document understanding.", + "bbox": [ + 89, + 380, + 483, + 455 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Dynamic high-resolution image encoding. To encode high-resolution images with various aspect ratios, a dynamic cropping [14, 65] is utilized to split the image into smaller patches while maintaining the integrity of the original aspect ratio. Each patch is a small image with $336 \\times 336$ size, and we treat them as individual inputs for the image encoder. After encoding images, we convert them via a projector (two-layer MLP) into visual document features $\\mathbf{z}_{\\mathrm{d}}$ .", + "bbox": [ + 88, + 479, + 483, + 602 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VDocRetriever. VDocRetriever is an LVLM-based dual-encoder architecture that encodes queries and document images independently. We append an $<\\mathrm{EOS}>$ token to the end of the question and visual document features $\\mathbf{z}_{\\mathrm{d}}$ , and then feed them into the LLM to obtain the question and visual document embeddings $(\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}})$ by taking the last layer $<\\mathrm{EOS}>$ vector. Then, it retrieves $k$ documents $\\hat{\\mathcal{I}}$ with the $k$ highest similarity scores to the question. Formally, the similarity scores between the question and visual document embeddings are computed via maximum inner product search [15], as follows: $\\mathrm{SIM}(\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}}) = \\frac{\\mathbf{h}_{\\mathrm{q}}^{\\top} \\mathbf{h}_{\\mathrm{d}}}{\\|\\mathbf{h}_{\\mathrm{q}}\\| \\|\\mathbf{h}_{\\mathrm{d}}\\|}$ .", + "bbox": [ + 89, + 625, + 483, + 803 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "VDocGenerator. VDocGenerator adapts LVLM to generate answers $A$ given the question $Q$ and the retrieved $k$ documents $\\hat{\\mathcal{I}}$ obtained from VDocRetriever. After encoding the retrieval result, we concatenate the question and the encoded result, then feed this combined input into the LLM.", + "bbox": [ + 89, + 824, + 483, + 900 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "4.2. Self-Supervised Pre-training Tasks", + "text_level": 1, + "bbox": [ + 511, + 330, + 815, + 348 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Figure 4a and 4b show our pre-taining tasks in VDocRetriever. The goal of pre-training is to transfer the powerful understanding and generation abilities of LVLMs to facilitate their usage in visual document retrieval. To this end, we propose two new self-supervised pre-training tasks to compress the entire image representation into the token at the end of the input image. Our pre-training process passes the document image, and its extracted OCR text is used as a pseudo target. Full pre-training objectives is defined as $\\mathcal{L} = \\mathcal{L}_{\\mathrm{RCR}} + \\mathcal{L}_{\\mathrm{RCG}}$ .", + "bbox": [ + 511, + 353, + 906, + 505 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Representation Compression via Retrieval (RCR). We compress image representations with a contrastive learning task that retrieves images relevant to their corresponding OCR text, by leveraging LVLM's image understanding capabilities. As shown in Figure 4a, we first construct positive OCR text-image pairs $(\\mathbf{h}_0,\\mathbf{h}_{\\mathrm{d}^+})$ from raw unlabeled document images. Then, we adopt in-batch negatives to calculate the contrastive loss by InfoNCE [44] as follows:", + "bbox": [ + 511, + 523, + 905, + 645 + ], + "page_idx": 3 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R C R}} = - \\log \\frac {\\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i \\in \\mathcal {B}} \\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} _ {i}}\\right) / \\tau\\right)}, \\tag {1}\n$$\n", + "text_format": "latex", + "bbox": [ + 566, + 655, + 903, + 690 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "where $\\tau$ is a temperature hyperparameter to scale the logits, and $\\mathcal{B}$ represents the batch size.", + "bbox": [ + 511, + 700, + 903, + 729 + ], + "page_idx": 3 + }, + { + "type": "text", + "text": "Representation Compression via Generation (RCG). We propose a representation training strategy that leverages the generative capabilities of LVLMs through a customized attention mask matrix. As depicted in Figure 4b, representations for the image tokens, including the token, are obtained via a standard auto-regressive process. In contrast, for the subsequent L OCR token representations, we mask the image token representations and allow only the attention of token and the preceding OCR tokens. This approach facilitates pooling the image representations", + "bbox": [ + 511, + 750, + 906, + 901 + ], + "page_idx": 3 + }, + { + "type": "image", + "img_path": "images/dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg", + "image_caption": [ + "Trainable" + ], + "image_footnote": [], + "bbox": [ + 114, + 99, + 135, + 116 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 186, + 104, + 233, + 113 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg", + "image_caption": [ + "(a) Representation Compression via Retrieval (RCR)" + ], + "image_footnote": [], + "bbox": [ + 107, + 130, + 356, + 273 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg", + "image_caption": [ + "Self-Supervised Pre-training", + "(b) Representation Compression via Generation (RCG)" + ], + "image_footnote": [], + "bbox": [ + 372, + 92, + 648, + 275 + ], + "page_idx": 4 + }, + { + "type": "image", + "img_path": "images/fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg", + "image_caption": [ + "Supervised Fine-tuning", + "(c) Visual Document Retrieval", + "Figure 4. Our pre-training tasks using unlabeled documents and fine-tuning in VDocRetriever. The RCR task retrieves relevant images given corresponding OCR tokens, and the RCG task outputs OCR tokens by paying attention to only the token." + ], + "image_footnote": [], + "bbox": [ + 655, + 130, + 898, + 276 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/1a194f91f9eb8affa85635e923392a2b9e31bb226c9ea35d790ff450f915ec84.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetDocuments%Filtered#Images#Train&Dev#Test
DocVQA [42]Industry84.812,7676,382-
InfoVQA [43]Infographic61.25,4859,5921,048
VisualMRC [56]Webpage71.910,2296,126-
ChartQA [41]Chart94.020,882-150
OpenWikiTable [27]Table0.01,2574,261-
DUDE [28]Open92.327,9552,135496
MPMQA [68]Manual81.710,0183,054-
SlideVQA [57]\\$Slide66.752,380-760
MHDocVQA\\$Open9.528,5509,470-
", + "bbox": [ + 94, + 359, + 480, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Table 2. Datasets in OpenDocVQA. $\\S$ denotes datasets requiring multi-hop reasoning. Note that MHDocVQA was created using only the training datasets.", + "bbox": [ + 89, + 510, + 483, + 554 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "into $<\\mathsf{EOS}>$ token. The loss function is defined as:", + "bbox": [ + 89, + 575, + 426, + 592 + ], + "page_idx": 4 + }, + { + "type": "equation", + "text": "\n$$\n\\mathcal {L} _ {\\mathrm {R C G}} = - \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\log p \\left(y _ {i} \\mid y _ {< i}, < \\mathrm {E O S} >\\right), \\tag {2}\n$$\n", + "text_format": "latex", + "bbox": [ + 153, + 597, + 482, + 638 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "where $y_{i}$ denotes the $i$ -th token of the OCR.", + "bbox": [ + 89, + 645, + 382, + 660 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "4.3. Supervised Fine-tuning", + "text_level": 1, + "bbox": [ + 89, + 667, + 307, + 684 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "We first fine-tune the VDocRetriever with the contrastive learning objective using query-document pairs with in-batch negatives (see Figure 4c). Then, we apply the trained VDocRetriever to search over the corpus $\\mathcal{I}$ to feed the top-k documents into the VDocGenerator. Finally, we train the VDocGenerator using the next-token prediction objective.", + "bbox": [ + 89, + 690, + 483, + 782 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5. Experiments", + "text_level": 1, + "bbox": [ + 89, + 792, + 223, + 810 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "5.1. Experimental Setup", + "text_level": 1, + "bbox": [ + 89, + 816, + 279, + 834 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Pre-training dataset. For pre-training, we gathered 500k samples containing document image and OCR text pairs filtered from the DocStruct4M [20]. We excluded any images that appeared in the test set to avoid data contamination.", + "bbox": [ + 89, + 839, + 483, + 900 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Fine-tuning and evaluation datasets. We evaluated our models in both zero-shot and supervised settings. The zero-shot evaluation assessed the models' generalization capabilities on unseen datasets, while the supervised evaluation measured performance when training samples were available. As shown in Table 2, we trained our models on seven datasets and evaluated them on four datasets, including ChartQA and SlideVQA in the zero-shot setting, and InfoVQA and DUDE in the supervised setting.", + "bbox": [ + 511, + 363, + 906, + 500 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Implementation details. We initialized VDocRAG with Phi3V [1], a state-of-the-art LVLM trained on high-resolution images and multi-image data. The parameters of VDocRetriever and VDocGenerator were not shared. We employed LoRA [21] with LLM while keeping other parameters frozen during training. We trained VDocRAG for one epoch on eight A100-80G GPUs with AdamW [36] optimizer and FlashAttention [11], using batch sizes of 16 for pre-training and 64 for fine-tuning. We set the temperature $\\tau$ to 0.01. We applied Tesseract [54] to extract OCR text in images. By default, we used the top three documents obtained from VDocRetirver.", + "bbox": [ + 511, + 518, + 908, + 700 + ], + "page_idx": 4 + }, + { + "type": "text", + "text": "Retrieval baselines. We compared VDocRetriever with two categories of retrievers. The first category includes off-the-shelf text retrieval models on extracted text and image retrieval models. These consist of BM25 [52], a lexical matching model; Contriver [22], E5 [59], and GTE [34], which are popular strong text embedding models based on BERT [12]; E5-Mistral [60] and NV-Embedv2 [30], which are state-of-the-art LLM-based embedding models; CLIP [47], a dual-encoder vision-language model; DSE [37] and VisRAG-Ret [66], which are state-of-the-art visual document retrieval models. The second category includes fine-tuned models trained on OpenDocVQA. To", + "bbox": [ + 511, + 719, + 908, + 901 + ], + "page_idx": 4 + }, + { + "type": "table", + "img_path": "images/d3a6583b9ab0f54d816ebe6f818bb357823c7fc0b4fbdeaae0b828e9de90fee4.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelInitDocsScale#PT#FTChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Off-the-shelf
BM25 [52]-Text00054.815.640.738.750.231.357.247.5
Contriever [22]BERT [12]Text110M1B500K66.959.350.846.542.521.040.629.7
E5 [59]BERT [12]Text110M270M1M74.966.353.649.649.226.945.038.9
GTE [34]BERT [12]Text110M788M3M72.864.755.449.151.332.542.436.0
E5-Mistral [60]Mistral [23]Text7.1B01.85M72.370.063.857.660.333.952.245.2
NV-Embed-v2 [30]Mistral [23]Text7.9B02.46M75.370.761.758.156.534.243.038.6
CLIP [47]ScratchImage428M400M054.638.638.129.745.320.623.217.6
DSE [37]Phi3V [1]Image4.2B05.61M72.768.573.067.267.449.655.547.7
VisRAG-Ret [66]MiniCPM-V [63]Image3.4B0240K87.2*75.5*74.3*68.4*71.9*51.7*56.444.5
Trained on OpenDocVQA
Phi3 [1]Phi3V [1]Text4B041K72.565.353.348.453.2*33.0*40.5*32.0*
VDocRetriever†Phi3V [1]Image4.2B041K84.2+11.774.8+9.571.0+17.765.1+16.766.8*+13.652.8*+19.848.4*+7.941.0*+9.0
VDocRetrieverPhi3V [1]Image4.2B500K41K86.0+1.876.4+1.677.3+6.373.3+8.272.9*+6.155.5*+2.757.7*+9.350.9*+9.9
", + "bbox": [ + 96, + 88, + 901, + 299 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/54efb87ac6dad28683019a443b5e882e7f23fa45ba4fe6d62ae166f575a26382.jpg", + "table_caption": [ + "Table 3. Retrieval results under the single- (Single) and all-pool (All) settings. * indicates performance on test data for which corresponding training samples are available. All other results represent zero-shot performance. Init, FT, and PT denote the initialization model, finetuning, and pre-training, respectively. Performance gains in green and blue are compared to the base LLM and VDocRetirver†, respectively." + ], + "table_footnote": [], + "table_body": "
GeneratorRetrieverDocsChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Closed-book
Phi3--20.020.020.320.334.9*34.9*23.1*23.1*
Text-based RAG
Phi3Phi3Text28.028.028.628.040.5*39.1*40.1*35.7*
Phi3GoldText36.636.627.827.845.6*45.6*55.9*55.9*
VDocRAG (Ours)
VDocGeneratorVDocRetrieverImage52.0+24.048.0+20.044.2+15.642.0+14.056.2*+15.749.2*+10.148.5*+8.444.0*+8.3
VDocGeneratorGoldImage74.074.056.456.464.6*64.6*66.4*66.4*
", + "bbox": [ + 145, + 364, + 854, + 510 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4. DocumentVQA results. All models are fine-tuned on OpenDocVQA. The results marked with * denote performance on unseen test samples, and the other results represent zero-shot performance. The performance gain in green is compared to the text-based RAG that has the same base LLM. Gold knows the ground-truth documents. Models answer the question based on the top three retrieval results.", + "bbox": [ + 88, + 521, + 906, + 565 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "verify the effectiveness of encoding documents through images, we fine-tuned the LLM in VDocRetriever (Phi3 [1]) using extracted text to represent documents. Additionally, we included a variant of VDocRetriever without pretraining (VDocRetriever†).", + "bbox": [ + 88, + 589, + 480, + 666 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "QA baselines. We compared VDocRAG against closed-book and text-based RAG models. These baselines used the same model initialization as VDocRAG but fine-tuned only the LLM (Phi3). The closed-book model received only the question as input, while the text-based RAG used the top three documents retrieved by the Phi3 retriever. Moreover, we assessed possible upper-bound performance by testing generation with ground-truth (Gold) documents.", + "bbox": [ + 88, + 684, + 482, + 806 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Evaluation metrics. We evaluated retrieval performance using nDCG@5, a widely used metric in information retrieval [17, 25]. For the DocumentVQA task, we followed the evaluation protocol of each dataset, we used ANLS [4] for InfoVQA and DUDE, Relaxed Accuracy [41] for", + "bbox": [ + 88, + 824, + 482, + 902 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "ChartQA, F1 for SlideVQA as evaluation metrics.", + "bbox": [ + 511, + 589, + 844, + 606 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.2. Retrieval Results", + "text_level": 1, + "bbox": [ + 511, + 616, + 681, + 631 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 3 shows that VDocRetriever† achieved significantly higher retrieval performance than the text-based Phi3 retriever on all datasets under the same conditions. This indicates that our model can effectively encode documents in image format for retrieval tasks. Furthermore, VDocRetriever exhibits superior zero-shot generalization on unseen datasets, ChartQA and SlideVQA, outperforming both off-the-shelf text retrievers and state-of-the-art visual document retrieval models. Notably, DSE was initialized with the same LVLM as ours and fine-tuned on 13.7 times more data. This highlights that our pre-training strategy and the OpenDocVQA dataset offer unique advantages that are not adequately addressed by existing approaches.", + "bbox": [ + 509, + 638, + 906, + 837 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "5.3. Retrieval-Augmented Generation Results", + "text_level": 1, + "bbox": [ + 511, + 847, + 867, + 864 + ], + "page_idx": 5 + }, + { + "type": "text", + "text": "Table 4 shows that VDocRAG significantly outperformed both the closed-book LLM and the text-based RAG on", + "bbox": [ + 511, + 869, + 906, + 900 + ], + "page_idx": 5 + }, + { + "type": "table", + "img_path": "images/daa3016ce5d5e7782cea565c8faa26b1c6efa12759c7e13273fa9d8ffc3e6863.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
ModelSlideVQAInfoVQA
VDocRetriever77.372.9
w/o RCR75.9-1.471.1-1.8
w/o RCG71.7-5.668.8-4.1
w/o RCG & RCR71.0-6.366.8-6.1
w/o LLM & Projector (→CLIP encoders)43.7-33.637.9-35.0
", + "bbox": [ + 94, + 88, + 480, + 184 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/4e61e1c195bf6000753a2dd640be8401758f0293e8d421f6f33ad6103d78a7f0.jpg", + "table_caption": [ + "Table 5. Ablation study of our pre-training tasks and model architecture in the retrieval task under the single-pool setting." + ], + "table_footnote": [], + "table_body": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
VDocRAG77.372.944.256.2
w/o MHDocVQA75.0-2.371.4-1.543.4-0.853.8-2.4
w/o except MHDocVQA68.8-8.561.7-11.241.1-3.144.0-12.2
", + "bbox": [ + 94, + 237, + 486, + 316 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg", + "image_caption": [ + "(a) Retrieval performance", + "Figure 5. Performance under different document lengths on InfoVQA (single-pool setting)." + ], + "image_footnote": [], + "bbox": [ + 91, + 369, + 287, + 500 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg", + "image_caption": [ + "(b) QA performance" + ], + "image_footnote": [], + "bbox": [ + 292, + 371, + 483, + 500 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "the DocumentVQA task, even when all models were the same initialization. Additionally, when the retrieval results were fixed to ground-truth (Gold) documents, VDocRAG demonstrated superior performance to text-based RAG. This underscores the importance of visual cues in extracting answers from documents and suggests that VDocGenerator has a higher upper-bound performance. Both text-based RAG and VDocRAG exhibited substantial improvements when provided with ground-truth documents, highlighting potential areas for enhancing retrieval accuracy and improving the generator's robustness to retrieval noise.", + "bbox": [ + 89, + 593, + 483, + 760 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "5.4. Analysis", + "text_level": 1, + "bbox": [ + 89, + 771, + 192, + 787 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Can our pre-training tasks be beneficial? Table 5 shows that VDocRetriever outperformed the model without pretraining. Removing each pre-training task or both RCG and RCR tasks decreased performance, indicating that both tasks contribute complementarily. These validate that our pre-training effectively learns to compress image features while aligning them with textual contents in images.", + "bbox": [ + 89, + 794, + 483, + 902 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/d4e697531784a0bdf0197a13e8f684df42e426dd4eef7cbab43178197d461901.jpg", + "table_caption": [ + "Table 6. Ablation study of our dataset in retrieval and QA tasks under the single-pool setting." + ], + "table_footnote": [], + "table_body": "
ModelRetrievalQA
OCREncodingGenerationTotal
Text-based RAGphi3590.070.7422.71083.4
VDocRAG-204.4789.7994.1
", + "bbox": [ + 540, + 88, + 882, + 157 + ], + "page_idx": 6 + }, + { + "type": "table", + "img_path": "images/cb5bfec95a153aac6c77d40d97d65aefdbbaadf610a541398d6e0b294385ba9e.jpg", + "table_caption": [ + "Table 7. Efficiency analysis on InfoVQA. The average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU." + ], + "table_footnote": [], + "table_body": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
Text-based RAGLLama360.161.837.849.5
VDocRAGIdefics373.472.548.959.9
w/o Pre-train70.369.847.259.6
", + "bbox": [ + 517, + 224, + 903, + 306 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Table 8. Analysis with different LVLM (Idefics3) in retrieval and QA tasks under the single-pool setting.", + "bbox": [ + 511, + 316, + 906, + 345 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Does LLM help understanding document images? Table 5 shows that retrieval performance dropped substantially when the LLM block was removed, leaving only the CLIP text/vision encoder, even with the same visual transformer backbone. This suggests that LLM can capture finer-grained visual details and enhance semantic understanding.", + "bbox": [ + 511, + 373, + 906, + 465 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Does our dataset improve the performance? Table 6 shows that removing MHDocVQA caused a performance decrease, indicating that MHDocVQA requires distinct reasoning skills compared to other collected datasets in OpenDocVQA. Additionally, excluding all OpenDocVQA datasets except MHDocVQA led to a significant performance drop. This confirms that our collected datasets effectively supplement the missing capabilities of LVLM in document retrieval and understanding.", + "bbox": [ + 511, + 488, + 908, + 625 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "How well does VDocRAG perform under different document lengths? Figure 5 shows that VDocRAG consistently outperforms text-based RAG, indicating that VDocRAG can better understand documents through visual information. In general, we observed that the VDocRAG's relative performance over text-based RAG is larger for images with 0-10 words (+66.0 in retrieval, +21.1 in QA) than for those with 500+ words (+28.4 in retrieval, +16.7 in QA).", + "bbox": [ + 511, + 648, + 908, + 771 + ], + "page_idx": 6 + }, + { + "type": "text", + "text": "Is VDocRAG more efficient than text-based RAG? Table 7 shows that VDocRAG is more efficient than text-based RAG. Especially, VDocRAG requires $69\\%$ less inference time to retrieve documents than text-based RAG. Although VDocRetriever takes more time for document encoding and generation, it eliminates the time-consuming OCR processing necessary for text-based RAG.", + "bbox": [ + 511, + 794, + 908, + 902 + ], + "page_idx": 6 + }, + { + "type": "image", + "img_path": "images/e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg", + "image_caption": [ + "Figure 6. Qualitative results of VDocRAG compared to text-based RAG." + ], + "image_footnote": [], + "bbox": [ + 94, + 89, + 903, + 318 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg", + "image_caption": [ + "(a) VDocRAG answers correctly, but Text-based RAG answers incorrectly" + ], + "image_footnote": [], + "bbox": [ + 101, + 366, + 267, + 487 + ], + "page_idx": 7 + }, + { + "type": "image", + "img_path": "images/150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg", + "image_caption": [ + "(b) VDocRAG answers incorrectly, but Text-based RAG answers correctly", + "Figure 7. Root causes of correct and incorrect predictions." + ], + "image_footnote": [], + "bbox": [ + 310, + 366, + 472, + 488 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Can our method apply different LVLMs? To investigate the impact of different LVLMs on VDocRAG, we replaced Phi3V with Idefics3 [29], a state-of-the-art LVLM that uses Llama3-8B [16] as its backbone LLM. As observed in Table 8, the performance trend was consistent with that of Phi3V, highlighting the versatility and broad applicability of our method.", + "bbox": [ + 89, + 571, + 482, + 676 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Qualitative results. Figure 6 illustrates the performance of our model through qualitative examples. In the top example, VDocRAG demonstrates strong performance on a question requiring multi-hop reasoning and graph understanding across multi-page slides. In the bottom example, VDocRAG also performs better on a question that requires parsing on the table with cells spanning multiple rows and columns. In contrast, text-based RAG depends solely on OCR text information, leading to a superficial understanding of the text and incorrect predictions.", + "bbox": [ + 89, + 698, + 483, + 849 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Human evaluation. To better understand the prediction differences between VDocRAG and text-based RAG, we", + "bbox": [ + 89, + 869, + 483, + 900 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "manually analyzed the generated outputs by identifying the root causes of 50 correct and 50 incorrect predictions, randomly sampled from test samples. Figure 7a shows that VDocRAG significantly enhances the understanding of visual data (e.g., charts). Conversely, Figure 7b reveals that VDocRAG encounters challenges with text-heavy documents (e.g., books), primarily due to the OCR capabilities. We observed that text-based RAG correctly answers questions when visual data includes long titles or subtitles, which have a high textual overlap with the question. These observations are in line with the results shown in Figure 5.", + "bbox": [ + 511, + 369, + 906, + 535 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "6. Conclusion", + "text_level": 1, + "bbox": [ + 511, + 550, + 633, + 566 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "We introduced a new RAG framework, VDocRAG, which can directly understand various real-world documents. We enhanced VDocRAG with two key contributions: (1) pretraining tasks capable of learning image representation efficiently by leveraging the powerful capabilities of LVLMs, and (2) OpenDocVQA, the first unified open-domain DocumentVQA dataset that encompasses a wide range of visually-rich documents. Our holistic evaluations on four datasets show that VDocRAG significantly outperformed conventional text-based RAG, shedding light on the development of an effective RAG over real-world documents.", + "bbox": [ + 511, + 575, + 906, + 743 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "Limitations. While we focused on pre-training to align images and OCR data for document retrieval, leveraging caption data instead of OCR data offers the potential for retrieving images that do not contain text. Moreover, this study did not address reducing the computational cost of creating search indexes for extensive image collections. We plan to reduce the cost of VDocRAG using more efficient techniques. Lastly, joint training of QA and retrieval components simultaneously further optimizes their interactions.", + "bbox": [ + 511, + 763, + 908, + 902 + ], + "page_idx": 7 + }, + { + "type": "text", + "text": "References", + "text_level": 1, + "bbox": [ + 91, + 89, + 187, + 104 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[1] Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv:2404.14219, 2024. 2, 5, 6, 3", + "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. GPT-4 technical report. arXiv:2303.08774, 2023. 1, 3", + "[3] Akari Asai, Sewon Min, Zexuan Zhong, and Danqi Chen. Retrieval-based language models and applications. In ACL, pages 41-46, 2023. 2", + "[4] Ali Furkan Biten, Rubén Tito, Andrés Mafla, Lluis Gómez i Bigorda, Marçal Rusinol, C. V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In ICCV, pages 4290-4300, 2019. 6", + "[5] Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In ICML, pages 2206-2240, 2022. 2", + "[6] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3", + "[7] Jingwen Chen, Yingwei Pan, Yehao Li, Ting Yao, Hongyang Chao, and Tao Mei. Retrieval augmented convolutional encoder-decoder networks for video captioning. TOMCCAP, pages 1-24, 2023. 2", + "[8] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv:2209.14491, 2022. 2", + "[9] Jaemin Cho, Debanjan Mahata, Ozan Irsoy, Yujie He, and Mohit Bansal. M3DocRAG: Multi-modal retrieval is what you need for multi-page multi-document understanding. arXiv:2411.04952, 2024. 2", + "[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. arXiv:2305.06500, 2023. 2", + "[11] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with io-awareness. In NeurIPS, pages 16344-16359, 2022. 5", + "[12] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pages 4171–4186, 2019. 5, 6", + "[13] Kuicai Dong, Yujing Chang, Xin Deik Goh, Dexun Li, Ruiming Tang, and Yong Liu. MMDocIR: Benchmarking multi-modal retrieval for long documents. arXiv:2501.08828, 2025. 2", + "[14] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan," + ], + "bbox": [ + 93, + 114, + 480, + 901 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv:2404.06512, 2024. 4", + "[15] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. The faiss library. arXiv:2401.08281, 2024. 4", + "[16] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv:2407.21783, 2024. 1, 8", + "[17] Manuel Faysse, Hugues Sibille, Tony Wu, Gautier Vi-aud, Céline Hudelot, and Pierre Colombo. ColPali: Efficient document retrieval with vision language models. arXiv:2407.01449, 2024. 1, 2, 3, 6", + "[18] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pretraining. In ICML, pages 3929-3938, 2020. 1", + "[19] Matthew Honnibal and Ines Montani. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear, 2017. 3", + "[20] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. nplug-docowl 1.5: Unified structure learning forOCR-free document understanding. arXiv:2403.12895, 2024. 5", + "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. arXiv:2106.09685, 2021. 5", + "[22] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. Unsupervised dense information retrieval with contrastive learning. arXiv:2112.09118, 2021. 5, 6, 3", + "[23] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. arXiv:2310.06825, 2023. 6", + "[24] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv:2401.04088, 2024. 1, 3", + "[25] Ehsan Kamalloo, Nandan Thakur, Carlos Lassance, Xueguang Ma, Jheng-Hong Yang, and Jimmy Lin. Resources for brewing heir: Reproducible reference models and an official leaderboard, 2023. 6", + "[26] Yuma Koizumi, Yasunori Ohishi, Daisuke Niizumi, Daiki Takeuchi, and Masahiro Yasuda. Audio captioning using pre-trained large-scale language model guided by audiobased similar caption retrieval. arXiv:2012.07331, 2020. 2", + "[27] Sunjun Kweon, Yeonsu Kwon, Seonhee Cho, Yohan Jo, and Edward Choi. Open-WikiTable: Dataset for open domain question answering with complex reasoning over table. In Findings of ACL, pages 8285-8297, 2023. 3, 5, 1" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 8 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[28] Jordy Landeghem, Rubén Tito, Łukasz Borchmann, Michal Pietruszka, Paweł Józiak, Rafał Powalski, Dawid Jurkiewicz, Mickaël Coustaty, Bertrand Ackaert, Ernest Valveny, et al. Document understanding dataset and evaluation (dude). In ICCV, pages 19528-19540, 2023. 2, 3, 5, 1", + "[29] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. arXiv:2408.12637, 2024. 2, 8", + "[30] Chankyu Lee, Rajarshi Roy, Mengyao Xu, Jonathan Raiman, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvEmbed: Improved techniques for training llms as generalist embedding models. arXiv:2405.17428, 2024. 5, 6, 3", + "[31] Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. In NIPS, pages 9459-9474, 2020. 1", + "[32] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, pages 12888-12900, 2022. 2", + "[33] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, pages 19730–19742, 2023. 2", + "[34] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. Towards general text embeddings with multi-stage contrastive learning. arXiv:2308.03281, 2023. 5, 6", + "[35] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv:2304.08485, 2023. 2", + "[36] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv:1711.05101, 2017. 5", + "[37] Xueguang Ma, Sheng-Chieh Lin, Minghan Li, Wenhu Chen, and Jimmy Lin. Unifying multimodal retrieval via document screenshot embedding. arXiv:2406.11251, 2024. 1, 2, 5, 6, 3", + "[38] Xueguang Ma, Shengyao Zhuang, Bevan Koopman, Guido Zuccon, Wenhu Chen, and Jimmy Lin. VISA: Retrieval augmented generation with visual source attribution. arXiv:2412.14457, 2024. 2", + "[39] Seiji Maekawa, Hayate Iso, Sairam Gurajada, and Nikita Bhutani. Retrieval helps or hurts? a deeper dive into the efficacy of retrieval augmentation to language models. In NAACL, pages 5506-5521, 2024. 1, 2", + "[40] Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In ACL, pages 9802-9822, 2023. 1, 2", + "[41] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In Findings of ACL, pages 2263-2279, 2022. 2, 3, 5, 6, 1", + "[42] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. DocVQA: A dataset for vqa on document images. In WACV, pages 2200-2209, 2021. 1, 2, 5" + ], + "bbox": [ + 91, + 92, + 480, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[43] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C.V. Jawahar. InfographicVQA. In WACV, pages 1697-1706, 2022. 1, 2, 3, 5", + "[44] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv:1807.03748, 2018. 4", + "[45] Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Retrieval augmented code generation and summarization. arXiv:2108.11601, 2021. 2", + "[46] Le Qi, Shangwen Lv, Hongyu Li, Jing Liu, Yu Zhang, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ting Liu. DuReadervis: A Chinese dataset for open-domain document visual question answering. In Findings of ACL, pages 1338-1351, 2022. 2, 3", + "[47] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 5, 6", + "[48] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR, 21(140):1-67, 2020. 2", + "[49] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. Incontext retrieval-augmented language models. TACL, pages 1316-1331, 2023. 2", + "[50] Rita Ramos, Desmond Elliott, and Bruno Martins. Retrievalaugmented image captioning. In EACL, pages 3666-3681, 2023. 2", + "[51] Rita Ramos, Bruno Martins, Desmond Elliott, and Yova Kementchedjhieva. Smallcap: lightweight image captioning prompted with retrieval augmentation. In CVPR, pages 2840-2849, 2023. 2", + "[52] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389, 2009. 5, 6", + "[53] Junyoung Seo, Susung Hong, Wooseok Jang, Ines Hyeonsu Kim, Minseop Kwak, Doyup Lee, and Seungryong Kim. Retrieval-augmented score distillation for text-to-3d generation. arXiv:2402.02972, 2024. 2", + "[54] Ray Smith. An overview of the tesseractOCR engine. In ICDAR, pages 629-633, 2007. 5", + "[55] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv:2210.09261, 2022. 1", + "[56] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. VisualMRC: Machine reading comprehension on document images. In AAAI, pages 13878-13888, 2021. 1, 2, 3, 5", + "[57] Ryota Tanaka, Kyosuke Nishida, Kosuke Nishida, Taku Hasegawa, Itsumi Saito, and Kuniko Saito. SlideVQA: A dataset for document visual question answering on multiple images. In AAAI, pages 13636-13645, 2023. 1, 2, 3, 5" + ], + "bbox": [ + 516, + 92, + 903, + 900 + ], + "page_idx": 9 + }, + { + "type": "list", + "sub_type": "ref_text", + "list_items": [ + "[58] Ryota Tanaka, Taichi Iki, Kyosuke Nishida, Kuniko Saito, and Jun Suzuki. Instructdoc: A dataset for zero-shot generalization of visual document understanding with instructions. In AAAI, pages 19071-19079, 2024. 2", + "[59] Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. Text embeddings by weakly-supervised contrastive pretraining. arXiv:2212.03533, 2022. 5, 6", + "[60] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. Improving text embeddings with large language models. In ACL, pages 11897-11916, 2024. 5, 6, 3", + "[61] Jilan Xu, Yifei Huang, Junlin Hou, Guo Chen, Yuejie Zhang, Rui Feng, and Weidi Xie. Retrieval-augmented egocentric video captioning. In CVPR, pages 13525-13536, 2024. 2", + "[62] Dongchao Yang, Songxiang Liu, Rongjie Huang, Chao Weng, and Helen Meng. Instructtts: Modelling expressive tts in discrete latent space with natural language style prompt. TASLP, pages 2913-2925, 2024. 2", + "[63] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, Qianyu Chen, Huarong Zhou, Zhensheng Zou, Haoye Zhang, Shengding Hu, Zhi Zheng, Jie Zhou, Jie Cai, Xu Han, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. Minicpm-v: A gpt-4v level mllm on your phone. arXiv:2408.01800, 2024. 6", + "[64] Michihiro Yasunaga, Armen Aghajanyan, Weijia Shi, Rich James, Jure Leskovec, Percy Liang, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. Retrieval-augmented multimodal language modeling. In ICML, pages 39755-39769, 2023. 2", + "[65] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Guohai Xu, Chenliang Li, Junfeng Tian, Qi Qian, Ji Zhang, Qin Jin, Liang He, Xin Lin, and Fei Huang. UReader: Universal OCR-free visually-situated language understanding with multimodal large language model. In EMNLP Findings, pages 2841-2858, 2023. 4", + "[66] Shi Yu, Chaoyue Tang, Bokai Xu, Junbo Cui, Junhao Ran, Yukun Yan, Zhenghao Liu, Shuo Wang, Xu Han, Zhiyuan Liu, et al. VisRAG: Vision-based retrieval-augmented generation on multi-modality documents. arXiv:2410.10594, 2024. 2, 5, 6", + "[67] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 2", + "[68] Liang Zhang, Anwen Hu, Jing Zhang, Shuo Hu, and Qin Jin. MPMQA: multimodal question answering on product manuals. In AAAI, pages 13958-13966, 2023. 2, 3, 5, 1", + "[69] Mingyuan Zhang, Xinying Guo, Liang Pan, Zhongang Cai, Fangzhou Hong, Huirong Li, Lei Yang, and Ziwei Liu. Remodiffuse: Retrieval-augmented motion diffusion model. In ICCV, pages 364-373, 2023. 2", + "[70] Shuyan Zhou, Uri Alon, Frank F Xu, Zhiruo Wang, Zhengbao Jiang, and Graham Neubig. Docprompting: Generating code by retrieving the docs. arXiv:2207.05987, 2022. 2", + "[71] Fengbin Zhu, Wenqiang Lei, Fuli Feng, Chao Wang, Haozhou Zhang, and Tat-Seng Chua. Towards complex doc" + ], + "bbox": [ + 91, + 90, + 480, + 900 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "ument understanding by discrete reasoning. In ACMM, pages 4857-4866, 2022. 2", + "bbox": [ + 545, + 90, + 903, + 119 + ], + "page_idx": 10 + }, + { + "type": "text", + "text": "Supplementary Material", + "text_level": 1, + "bbox": [ + 380, + 131, + 614, + 152 + ], + "page_idx": 11 + }, + { + "type": "table", + "img_path": "images/e9f6bab9c5e222d059bdff8614152323cb97a9ba24e7c9b7f1263981d7f9fd2b.jpg", + "table_caption": [], + "table_footnote": [ + "Table A. Main statistics in OpenDocVQA." + ], + "table_body": "
StatisticsNumber
Total Images206,267
Total Questions43,474
- Single-Hop Questions33,244 (76.5%)
- Multi-Hop Questions10,230 (23.5%)
- Extractive Answer19,797 (45.5%)
- Abstractive Answer23,677 (54.5%)
QA Source Datasets9
- Existing DocumentVQA Datasets7
- Existing TableQA Datasets1
- Our Newly Created Datasets1
Maximum Question Length58
Maximum Answer Length130
Average Question Length13.7
Average Answer Length3.7
", + "bbox": [ + 125, + 166, + 447, + 409 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 573, + 166, + 846, + 273 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg", + "image_caption": [ + "(a) Word cloud of questions.", + "(b) Word cloud of answers." + ], + "image_footnote": [], + "bbox": [ + 573, + 287, + 844, + 396 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "A. OpenDocVQA Details", + "text_level": 1, + "bbox": [ + 89, + 463, + 303, + 479 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Dataset Statistics. The main statistics of OpenDocVQA are presented in Table A. There are two types of questions: single-hop (45.5%) and multi-hop (23.5%). Answers to questions are categorized as extractive (45.5%) and abstractive (54.5%) types. OpenDocVQA consists of nine open-domain DocumentVQA datasets, including a newly created MHDocVQA dataset to address multi-hop questions over multiple documents, and collected and filtered QA datasets as follows.", + "bbox": [ + 89, + 491, + 483, + 625 + ], + "page_idx": 11 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- DocVQA [42] includes industry document images collected from the UCSF Industry Document Library.", + "- InfoVQA [43] includes infographics downloaded from the Internet for the search query \"infographics\".", + "- VisualMRC [56] is a visual machine reading comprehension on webpage screenshot images.", + "ChartQA [41] is a chart understanding dataset with human-written and machine-generated questions focusing on visual and logical reasoning.", + "- OpenWikiTable [27] is an open-domain question answering over tables. We took screenshot images of the tables, converting them into images with complex text layouts to handle visually-rich table data.", + "- DUDE [28] is a multi-page, multi-domain, and multi-industry QA dataset that requires processing long documents and understanding different types of documents.", + "- MPMQA [68] requires comprehending multimodal content in an entire product manual and answering questions." + ], + "bbox": [ + 89, + 628, + 482, + 901 + ], + "page_idx": 11 + }, + { + "type": "image", + "img_path": "images/53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg", + "image_caption": [ + "Figure A. Word cloud distributions of question and answer texts.", + "Figure B. Distribution of first three words of the question." + ], + "image_footnote": [], + "bbox": [ + 586, + 450, + 859, + 679 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "- SlideVQA [57] requires multi-hop reasoning over multiple slide images containing various text formats, layouts, and visual content such as plots and charts.", + "bbox": [ + 511, + 729, + 903, + 775 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Figure A presents word clouds of the most frequently appeared words in the question and answer texts, illustrating that OpenDocVQA covers a wide range of topics and words. This observation is further supported by Figure B, which is a sunburst of the first three words of the questions.", + "bbox": [ + 511, + 776, + 903, + 852 + ], + "page_idx": 11 + }, + { + "type": "text", + "text": "Filtering DocumentVQA datasets. We applied the following five heuristic rules to automatically filter out likely", + "bbox": [ + 511, + 869, + 903, + 901 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "#", + "bbox": [ + 99, + 88, + 132, + 116 + ], + "page_idx": 11 + }, + { + "type": "header", + "text": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents", + "bbox": [ + 133, + 99, + 897, + 119 + ], + "page_idx": 11 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Multi-hop Question Generation Prompt" + ], + "code_body": "EXAMPLE1: \nquestion1: In which country is the GWP smallest? \nanswer1: Denmark \nquestion2: What is the staple diet of Denmark? \nanswer2: Fish, cheese \ncombined question: What is the staple diet of the country where the GWP is the smallest? \nEXAMPLE2: \nquestion1: To which League does Chicago Cubs belong? \nanswer1: MLB \nquestion2: What is the average MLB team value? \nanswer2: $1.5b \ncombined question: What is the average the league where Chicago Cubs belongs to team value? \nEXAMPLE3 \nquestion1: Which is the capital city of Germany? \nanswer1: Berlin \nquestion2: What year did Berlin host the OKFestival? \nanswer2: It's 2014. \ncombined question: What year did the capital city of Germany host the OKFestival? \nBased on the above 3 examples, provide a combined question for the following case, such that the answer to the combined question is the same as the answer2: \nquestion1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \ncombined question:", + "guess_lang": "txt", + "bbox": [ + 114, + 118, + 833, + 455 + ], + "page_idx": 12 + }, + { + "type": "code", + "sub_type": "code", + "code_caption": [ + "Table B. Multi-hop question generation prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions.", + "Multi-hop Question Filtering Prompt", + "Table C. Multi-hop question filtering prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions. “{multi-hop question}” denotes the generated multi-hop questions." + ], + "code_body": "question1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \nBased on the questions and answers above, please answer the following question shortly. If the answer is not identified, the answer is 'None': {multi-hop question}", + "guess_lang": "txt", + "bbox": [ + 114, + 556, + 800, + 642 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "context-dependent questions:", + "bbox": [ + 89, + 726, + 285, + 739 + ], + "page_idx": 12 + }, + { + "type": "list", + "sub_type": "text", + "list_items": [ + "- The question has one or more demonstrative pronouns, including \"this\", \"these\", and \"those\".", + "- The question has one or more personal pronouns, including \"she\", \"he\", \"her\", \"his\", and \"him\".", + "- The question has one or more specific keywords, including \"the document\" and \"mention\".", + "- The question does not contain entities except for numbers.", + "- The question is shorter than six words." + ], + "bbox": [ + 89, + 744, + 482, + 866 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Any samples matching at least one of these rules were removed from our dataset. After applying the rules, we", + "bbox": [ + 89, + 869, + 482, + 901 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "manually reviewed all the questions to ensure context-independence, guided by the instruction: \"When you see the question without a given document, can you find a unique document in the corpus to provide a unique answer?\" To validate our review, we randomly sampled 50 questions with their gold and top-5 retrieved documents (from VDocRetriever) and found no ambiguous cases, confirming the high quality of our process.", + "bbox": [ + 511, + 726, + 906, + 847 + ], + "page_idx": 12 + }, + { + "type": "text", + "text": "Prompts for creating multi-hop questions. Table B shows the prompt for combining two single-hop questions", + "bbox": [ + 511, + 869, + 906, + 901 + ], + "page_idx": 12 + }, + { + "type": "table", + "img_path": "images/44bd8baf3943678355dc4b467e0811d9b27915bb0fc9814308e0a335c81d698f.jpg", + "table_caption": [], + "table_footnote": [], + "table_body": "
DatasetTask Description
DocVQAYou have to find an industry document that answers my question.
InfoVQAGiven a question, retrieve an infographic to answer the question.
VisualMRCI'm looking for a screenshot image that answers the question.
ChartQAGiven a user query, retrieve a chart image that answers the query.
OpenWikiTableGiven a user query, retrieve a table image for answering the question.
DUDEYou need to retrieve evidence from a PDF page to address the question.
MPMQAI want to know the answer to the question. Can you find evidence from manual pages?
SlideVQAGiven a question, retrieve a slide image to answer the question.
MHDocVQAGiven a multihop-question, retrieve multiple pages that can help answer the question.
", + "bbox": [ + 189, + 88, + 807, + 241 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/a670a379606925aebe13fb798758eded19703da2df393c27de2a6123d0706960.jpg", + "table_caption": [ + "Table D. Instructions in the visual document retrieval task." + ], + "table_footnote": [], + "table_body": "
ModelModel Checkpoint
Contrieverfacebook/contriever-msmarco
E5intfloat/e5-base-v2
GTEthenlper/gte-base
E5-Mistralintfloat/e5-mistral-7b-instruct
NV-Embed-v2nvidia/NV-Embed-v2
CLIPopenai/clip-vit-large-patch14-336
DSETevatron/dse-phi3-docmatix-v1
VisRAG-Retopenmbv/VisRAG-Ret
Phi3Vmicrosoft/Phi-3-vision-128k-instruct
Idefics3HuggingFaceM4/Idefics3-8B-Llama3
", + "bbox": [ + 99, + 290, + 475, + 440 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/5090daed0843b128ed56ee5afc1f886125beab326b643bf72d7df2aaafa12051.jpg", + "table_caption": [ + "Table E. Model checkpoints stored on HuggingFace." + ], + "table_footnote": [], + "table_body": "
HyperparametersValue
Learning Rate1e-4
Gradient Accumulation4
Adam W β10.9
Adam W β20.999
LoRA Attention Dimension r8
LoRA Scaling Alpha64
LoRA Dropout0.1
LoRA Target*.proj
BF16True
", + "bbox": [ + 181, + 477, + 393, + 619 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "to generate multi-hop questions. Moreover, Table C shows the prompt for filtering the generated multi-hop questions.", + "bbox": [ + 89, + 669, + 482, + 702 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "B. Experimental Details", + "text_level": 1, + "bbox": [ + 89, + 713, + 295, + 729 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Instruction templates. Following a standard LLM-based retrieval training and evaluation strategy [60], we applied natural language instruction templates to the original question for the visual document retrieval task:", + "bbox": [ + 89, + 739, + 482, + 799 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Instruct: {task description} \\n Query: {question},", + "bbox": [ + 119, + 811, + 452, + 829 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "where “{task description}” is a placeholder for a one-sentence task description as shown in Table D. Note that the instruction format was applied to only LLM-based retrievers, including E5-Mistral [60], NV-Embed-v2 [30],", + "bbox": [ + 89, + 839, + 483, + 902 + ], + "page_idx": 13 + }, + { + "type": "table", + "img_path": "images/2c78af87a7f5b90dd0fafe9014a23fa580f89c137511a187dc4e21ee04dd4a3a.jpg", + "table_caption": [ + "Table F. Hyperparameters used for pre-training and fine-tuning." + ], + "table_footnote": [], + "table_body": "
Max Image ResolutionRetrievalANLSQA Generation Time
nDCG@5Encoding Time
336×33628.785.037.2394.5
672×67272.8106.442.7490.9
1344×134472.9204.456.2789.7
", + "bbox": [ + 517, + 290, + 903, + 372 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Table G. Impact of image resolution on InfoVQA under the single-pool setting. Average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU.", + "bbox": [ + 511, + 382, + 906, + 426 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "DSE [37], Phi3 [1], and VDocRetriever. Our preliminary experiments observed that using the instruction during both training and evaluation improved the performance of LLM-based retrievers. However, applying the same instruction format to non-LLM-based retrievers, such as Contriever [22], resulted in a performance decline due to lacking instruction-following capabilities. Furthermore, we appended an instruction regarding the desired output format for the DocumentVQA task:", + "bbox": [ + 511, + 450, + 908, + 589 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "$\\backslash$ n Answer briefly.", + "bbox": [ + 645, + 599, + 772, + 617 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Model checkpoints Table E shows model initialization checkpoints stored on HuggingFace1.", + "bbox": [ + 511, + 635, + 906, + 667 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "Model hyperparameters Table F lists hyperparameters in pre-training and fine-tuning used for our models.", + "bbox": [ + 511, + 685, + 905, + 715 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "C. Additional Experimental Analysis", + "text_level": 1, + "bbox": [ + 511, + 729, + 826, + 748 + ], + "page_idx": 13 + }, + { + "type": "text", + "text": "How does image resolution impact performance? Table G shows that increasing image resolution improved the model's capability to understand and encode the document; however, it also significantly increased the inference time for both retrieval and QA tasks. Moreover, the performance in the QA task exhibited greater sensitivity to image resolution compared to the retrieval task, indicating that the QA task demands more detailed visual understanding.", + "bbox": [ + 511, + 755, + 906, + 878 + ], + "page_idx": 13 + }, + { + "type": "page_footnote", + "text": "1https://huggingface.co", + "bbox": [ + 531, + 886, + 715, + 900 + ], + "page_idx": 13 + }, + { + "type": "image", + "img_path": "images/c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg", + "image_caption": [ + "Figure C. QA performance with various top-k on InfoVQA under the single-pool setting. () denotes document sources." + ], + "image_footnote": [], + "bbox": [ + 102, + 89, + 472, + 232 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "How many retrieved documents to augment? Figure C shows that incorporating three documents yielded the best results in VDocRAG. While adding a few documents may include helpful contexts, adding more low-ranked or randomly sampled documents introduces noise and deteriorates generation due to the imperfections of retrievers.", + "bbox": [ + 89, + 297, + 482, + 388 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "Additional qualitative results. Figure D shows qualitative results of VDocRAG compared to text-based RAG. VDocRAG demonstrates significant performance advantages in understanding layouts and visual content, such as tables, charts, figures, and diagrams. These findings highlight the critical role of representing documents as images to improve the performance of the RAG framework.", + "bbox": [ + 89, + 407, + 482, + 513 + ], + "page_idx": 14 + }, + { + "type": "text", + "text": "VDocRetriever", + "text_level": 1, + "bbox": [ + 372, + 152, + 452, + 162 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "How many apps does the company which makes Clash of Clans make?", + "bbox": [ + 107, + 176, + 263, + 196 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground-truth: 7", + "bbox": [ + 112, + 219, + 189, + 229 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based RAG: 61", + "bbox": [ + 112, + 238, + 207, + 248 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 220, + 236, + 236, + 247 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VDocRAG: 7", + "bbox": [ + 112, + 253, + 176, + 265 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 220, + 253, + 236, + 266 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 264, + 189, + 400, + 272 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 413, + 189, + 558, + 273 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based Retriever", + "text_level": 1, + "bbox": [ + 679, + 151, + 787, + 162 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Top1", + "bbox": [ + 640, + 175, + 661, + 186 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 666, + 175, + 681, + 186 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 589, + 188, + 727, + 271 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Top2", + "bbox": [ + 790, + 175, + 812, + 186 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 815, + 175, + 834, + 186 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Top Free iOS App Earners", + "bbox": [ + 764, + 195, + 864, + 205 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 766, + 207, + 888, + 271 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "What is the Stream Source for the API which uses Java, Scala, and Python?", + "bbox": [ + 109, + 306, + 250, + 337 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground-truth: HDFS, Network", + "bbox": [ + 116, + 348, + 259, + 359 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based RAG: Fink", + "bbox": [ + 116, + 367, + 220, + 378 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 241, + 367, + 259, + 378 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VDocRAG: HDFS, Network", + "bbox": [ + 116, + 383, + 243, + 393 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 264, + 316, + 418, + 407 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 419, + 316, + 568, + 407 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 673, + 300, + 692, + 311 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 588, + 314, + 733, + 404 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "The Reactive Streams Initiative", + "bbox": [ + 764, + 321, + 867, + 329 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Reactive Streams is an initiative to provide a standard for asynchronous stream processing with non-blocking back pressure on the JVM \nProblem Scope \nHandling streams of (live) data in an asynchronous and possibly non-blocking way Finding a minimal API describing the operations available on Reactive Streams \nImplementers \nRxlava \nAkka Streams \nReactor Composable Ratpack", + "bbox": [ + 748, + 330, + 888, + 397 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Which is Microsoft's biggest acquisition to date?", + "bbox": [ + 114, + 443, + 241, + 464 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground-truth: Skype", + "bbox": [ + 119, + 486, + 218, + 497 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based RAG: Oculus", + "bbox": [ + 120, + 503, + 233, + 513 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 236, + 503, + 253, + 515 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VDocRAG: Skype", + "bbox": [ + 120, + 521, + 205, + 532 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 236, + 520, + 254, + 531 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 284, + 450, + 429, + 537 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 473, + 449, + 542, + 537 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Increasing number of prominent successful exits\n• Oracle TRUE2014\n• Acquired (Prestige) IPO Acquired (Oracle)\n• Acquired (Open) IPO\n• Gravity baily inktank + CDO\n• Acquired (AOL) Acquired (Apple) Acquired (Red Hat) Acquired (Open) Acquired (Oracle)\n• $8B+ in 2014 so far with more to come\nupfront", + "bbox": [ + 593, + 453, + 723, + 532 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 826, + 436, + 846, + 448 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 738, + 450, + 893, + 532 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "How many layers are used in the gloves for the DPE suit?", + "bbox": [ + 114, + 573, + 256, + 594 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground-truth: Three", + "bbox": [ + 116, + 616, + 212, + 626 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based RAG: Two", + "bbox": [ + 116, + 633, + 220, + 645 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 633, + 250, + 645 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VDocRAG: Three", + "bbox": [ + 116, + 651, + 199, + 661 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 233, + 651, + 250, + 661 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 300, + 579, + 555, + 665 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg", + "image_caption": [ + "Top" + ], + "image_footnote": [], + "bbox": [ + 584, + 579, + 730, + 671 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 740, + 579, + 890, + 671 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "What is the phase before full moon?", + "bbox": [ + 114, + 708, + 223, + 728 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Ground-truth: Waxing Gibbous", + "bbox": [ + 117, + 747, + 263, + 758 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "Text-based RAG: New Mod", + "bbox": [ + 119, + 765, + 243, + 776 + ], + "page_idx": 15 + }, + { + "type": "text", + "text": "VDocRAG: Waxing Gibbous", + "bbox": [ + 119, + 782, + 245, + 792 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg", + "image_caption": [], + "image_footnote": [], + "bbox": [ + 285, + 715, + 359, + 795 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 369, + 715, + 563, + 795 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg", + "image_caption": [ + "Top1" + ], + "image_footnote": [], + "bbox": [ + 584, + 715, + 725, + 792 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg", + "image_caption": [ + "op2", + "Figure D. Additional qualitative results of VDocRAG compared to Text-based RAG." + ], + "image_footnote": [], + "bbox": [ + 730, + 715, + 805, + 796 + ], + "page_idx": 15 + }, + { + "type": "image", + "img_path": "images/bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg", + "image_caption": [ + "Top2" + ], + "image_footnote": [], + "bbox": [ + 807, + 720, + 890, + 795 + ], + "page_idx": 15 + } +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_model.json b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_model.json new file mode 100644 index 0000000000000000000000000000000000000000..fe05323bee6c3a6da012fe8942f6e9577ffad712 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_model.json @@ -0,0 +1,3829 @@ +[ + [ + { + "type": "aside_text", + "bbox": [ + 0.023, + 0.266, + 0.061, + 0.708 + ], + "angle": 270, + "content": "arXiv:2504.09795v1 [cs.CL] 14 Apr 2025" + }, + { + "type": "header", + "bbox": [ + 0.099, + 0.124, + 0.133, + 0.153 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.134, + 0.135, + 0.9, + 0.157 + ], + "angle": 0, + "content": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents" + }, + { + "type": "text", + "bbox": [ + 0.101, + 0.184, + 0.912, + 0.24 + ], + "angle": 0, + "content": "Ryota Tanaka\\(^{1,2}\\) Taichi Iki\\(^{1}\\) Taku Hasegawa\\(^{1}\\) Kyosuke Nishida\\(^{1}\\) Kuniko Saito\\(^{1}\\) Jun Suzuki\\(^{2}\\) \n\\(^{1}\\)NTT Human Informatics Laboratories, NTT Corporation \nhttps://vdocrag.github.io" + }, + { + "type": "title", + "bbox": [ + 0.248, + 0.273, + 0.327, + 0.289 + ], + "angle": 0, + "content": "Abstract" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.304, + 0.486, + 0.637 + ], + "angle": 0, + "content": "We aim to develop a retrieval-augmented generation (RAG) framework that answers questions over a corpus of visually-rich documents presented in mixed modalities (e.g., charts, tables) and diverse formats (e.g., PDF, PPTX). In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied documents and modalities in a unified image format to prevent missing information that occurs by parsing documents to obtain text. To improve the performance, we propose novel self-supervised pre-training tasks that adapt large vision-language models for retrieval by compressing visual information into dense token representations while aligning them with textual content in documents. Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain document visual question answering datasets, encompassing diverse document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments show that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization capability, highlighting the potential of an effective RAG paradigm for real-world documents." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.664, + 0.222, + 0.68 + ], + "angle": 0, + "content": "1. Introduction" + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.689, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Large language models (LLMs) have demonstrated impressive performance on diverse natural language tasks [2, 16, 24, 55]. These models struggle with factual errors despite their increased model and data scale [39, 40]. To remedy this problem, retrieval-augmented generation (RAG) methods [18, 31] can retrieve knowledge from an external corpus, potentially reducing hallucination and increasing knowledge coverage. Most previous RAG frameworks assume the context is composed entirely of text, with no graphical elements. In contrast, a significant amount of real-world information is stored in visually-rich documents, such as charts, tables, web pages, and office documents. These documents often contain both textual and visual objects, with content spread structurally across various loca" + }, + { + "type": "image", + "bbox": [ + 0.526, + 0.275, + 0.895, + 0.513 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.513, + 0.53, + 0.908, + 0.587 + ], + "angle": 0, + "content": "Figure 1. Our framework of VDocRAG and examples from OpenDocVQA. VDocRAG consists of VDocRetirver and VDocGenerator, which can retrieve relevant documents and generate answers by understanding the original appearance of documents." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.614, + 0.817, + 0.629 + ], + "angle": 0, + "content": "tions depending on diverse formats and types." + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.63, + 0.907, + 0.87 + ], + "angle": 0, + "content": "Thus, document visual question answering (DocumentVQA) [42, 43, 56, 57] aims to build an agent capable of reading and comprehending document images to answer the question. Here, most existing DocumentVQA questions operate in a closed setting without requiring any retrieval. While this definition simplifies the QA model, it does not reflect many real-world use cases where the question is asked through some open-domain natural language interface, such as QA systems searching information across in-house documents or customer service chatbots on e-commerce websites. To address this limitation, recent works have introduced retrieval tasks on document images [17, 37]. However, these cannot fully develop models that effectively integrate the retrieved information into the final output. This gap hinders the application of DocumentVQA models in more realistic, open-domain scenarios." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.905, + 0.901 + ], + "angle": 0, + "content": "In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied docu" + } + ], + [ + { + "type": "text", + "bbox": [ + 0.09, + 0.092, + 0.482, + 0.408 + ], + "angle": 0, + "content": "ments and modalities in a unified image format to avoid tedious parsing and potential information loss that occurs in conventional text-based RAG. As depicted in Figure 1, VDocRAG consists of two main components, both of which effectively leverage the visual features of documents. First, VDocRetriever retrieves document images related to the question from a corpus of document images. Second, VDocGenerator uses these retrieved images to generate the answer. To encode document images and interact with the encoded information, we adapt pre-trained large vision language models (LVLMs) [1, 29] as the backbone for VDocRAG. Since LVLMs are inherently generative models, it is sub-optimal for embeddings as they prevent the representations from capturing information across the entire input sequence due to the training objective (i.e., next-token prediction). To bridge this gap, we introduce new self-supervised pre-training tasks that harness the understanding and generation capabilities of LVLMs to enhance representation learning. Specifically, we compress the entire image representation into a dense token representation, by aligning the text in documents via retrieval and generation tasks." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.409, + 0.482, + 0.544 + ], + "angle": 0, + "content": "Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain DocumentVQA datasets encompassing a wide range of document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments demonstrate that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization performance." + }, + { + "type": "text", + "bbox": [ + 0.11, + 0.545, + 0.451, + 0.559 + ], + "angle": 0, + "content": "Our main contributions are summarized as follows:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.56, + 0.483, + 0.605 + ], + "angle": 0, + "content": "- We introduce a new RAG framework, VDocRAG, which can directly understand diverse real-world documents purely from visual features." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.606, + 0.483, + 0.65 + ], + "angle": 0, + "content": "- We are the first to explore pre-training tasks designed for document retrieval-oriented adaptation of LVLMs, by compressing visual document representations." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.651, + 0.483, + 0.68 + ], + "angle": 0, + "content": "- We introduce OpenDocVQA, the first unified open-domain DocumentVQA dataset with diverse documents." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.56, + 0.483, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.694, + 0.233, + 0.71 + ], + "angle": 0, + "content": "2. Related Work" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.72, + 0.483, + 0.901 + ], + "angle": 0, + "content": "Retrieval-augmented generation (RAG). RAG in the NLP community aims at retrieving external knowledge to reduce factual errors and enhance performance in various knowledge-intensive tasks [3, 5, 39, 40, 49]. Inspired by the success of RAG in NLP, this technique has also applied applications across different domains, including images [8, 50, 51, 64], codes [45, 70], videos [7, 61], audio [26, 62], and 3D [53, 69]. However, most existing works have focused on retrieving knowledge from only plain-text documents or non-text media. In contrast, we tackle the challenge of extracting knowledge from visually-rich documents organized in complex, multimodal formats." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.092, + 0.908, + 0.454 + ], + "angle": 0, + "content": "Visual document retrieval and visual RAG. With the success of LLMs, there is a growing trend to build large vision language models (LVLMs) that integrate image understanding capabilities by combining image encoders [32, 48, 67] with LLMs [1, 10, 29, 33, 35, 58]. Concurrent works in visual document retrieval [13, 17, 37] and visual RAG [9, 38, 66] leverage LVLMs to directly encode visually-rich documents through images. However, these approaches have trouble understanding diverse real-world documents due to the limitations of their datasets and training strategies. The existing visual document retrieval dataset, ViDoRe [37], contains questions that might not require retrieval and handles a limited number of document types, resulting in a gap between real-world scenarios. In contrast, our dataset covers open document types and provides questions that are verified by humans to require retrieval and to have context-independent conditions for the retrieval. From the perspective of training, despite the significant gap between generative pre-training tasks and retrieval tasks in LVLMs, previous works [9, 17, 37, 38, 66] leverage LVLMs without specific training for bridging the gap. To address this, we introduce pre-training tasks that transfer the understanding and generation capabilities of LVLMs to retrievers." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.476, + 0.908, + 0.765 + ], + "angle": 0, + "content": "Document visual question answering (DocumentVQA). DocumentVQA is a high-level document understanding task that involves answering questions on visually-rich documents. These documents include a variety of elements, such as handwritten and digital text [42, 56], complex layouts [28, 68, 71], and graphical elements [41, 43, 57]. However, previous studies have assumed closed settings that do not require retrieval, except for Dureader_vis [46]. Our work differs from Dureader_vis as follows. First, OpenDocVQA covers a wide range of document formats and domains, while Dureader_vis focuses on screenshots of websites, limiting its generalizability. Second, OpenDocVQA reflects more real-world scenarios that require both single- and multi-hop reasoning over documents, while Dureader_vis requires only single-hop reasoning. Lastly, even lexical search methods yield sufficient performance in Dureader_vis due to its reliance on textual content. In contrast, OpenDocVQA requires a visual semantic search where visual and contextual information can be exploited." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.778, + 0.81, + 0.795 + ], + "angle": 0, + "content": "3. OpenDocVQA Task and Dataset" + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.803, + 0.688, + 0.818 + ], + "angle": 0, + "content": "3.1. Task Formulation" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.825, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Given a large collection of \\(N\\) document images \\(\\mathcal{I} = \\{I_1,\\dots,I_N\\}\\) and a question \\(Q\\), the goal of OpenDocVQA task is to output an answer \\(A\\) by finding the relevant \\(k\\) images \\(\\hat{\\mathcal{I}}\\in \\mathcal{I}\\), where \\(k\\ll N\\). We decompose the task into two stages. Visual document retrieval: given \\(Q\\) and \\(\\mathcal{I}\\)," + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.09, + 0.496, + 0.239 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.091, + 0.251, + 0.483, + 0.266 + ], + "angle": 0, + "content": "Figure 2. Process of creating multi-hop DocumentVQA questions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.29, + 0.483, + 0.336 + ], + "angle": 0, + "content": "the model retrieves the relevant \\(k\\) images \\(\\hat{\\mathcal{I}}\\) from which to derive the answer. DocumentVQA: the model takes \\(Q\\) and the retrieved images \\(\\hat{\\mathcal{I}}\\) as input, to generate \\(A\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.337, + 0.484, + 0.457 + ], + "angle": 0, + "content": "OpenDocVQA covers multiple open-domain DocumentVQA datasets with diverse document types. To reflect real-world scenarios, we evaluate models with both single-pool and all-pool settings. In the single-pool setting, retrieval is performed from a specific pool of documents provided by each original dataset. The all-pool setting requires retrieving from the entire candidate pool, which includes documents from a wide range of domains." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.466, + 0.269, + 0.48 + ], + "angle": 0, + "content": "3.2. Dataset Collection" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.488, + 0.484, + 0.653 + ], + "angle": 0, + "content": "Filtering of DocumentVQA datasets. We collected and filtered instances of seven existing document VQA datasets [28, 41-43, 56, 57, 68]. Most of their questions are context-dependent conditions, where they cannot be answered without referencing the accompanying document (e.g., What is the title?). Therefore, we filtered out questions lacking sufficient context for retrieval. To address this, we initially applied heuristic rules to automatically select likely context-independent questions, reducing the pool by \\(20.9\\%\\). Then, we manually reviewed and verified the remaining examples to ensure their context independence." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.672, + 0.484, + 0.779 + ], + "angle": 0, + "content": "Reformulation of TableQA dataset. We used QA pairs from Open-WikiTable [27], an open-domain TableQA dataset that required retrieving tables from Wikipedia to answer the question. Since the original dataset provides tables in only textual format (HTML data), we took the screenshot images of tables from the corresponding Wikipedia pages to reformulate the task as the OpenDocVQA." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Creation of new multi-hop questions. To enhance the model's ability to interact with multiple document sources (e.g., charts and tables), we semi-automatically created a multi-hop DocumentVQA dataset, MHDocVQA, using the single-hop QA pairs collected in the previous steps. As shown in Figure 2, the creating process involved the following steps: (1) We first used spaCy [19] to identify a bridge" + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.089, + 0.906, + 0.239 + ], + "angle": 0, + "content": "
ViDoRe [17]Dureadervis [46]OpenDocVQA
Retrieval
QA
Context-Independent
Visual Semantic Search
Multi-Hop
Document ContentsT, L, F, C, DT, LT, L, F, C, D
Answer Types-ExtExt, Abs
#Document Types61Open
#QAs3,81015,00043,474
#Images (Pages)8,310158,000206,267
" + }, + { + "type": "table_caption", + "bbox": [ + 0.512, + 0.249, + 0.907, + 0.291 + ], + "angle": 0, + "content": "Table 1. Comparison of related datasets. Document contents include (T)able, (L)ist, (F)igure, (C)hart, and (D)iagram. Answer types are Extractive (Ext) and Abstractive (Abs)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.321, + 0.907, + 0.488 + ], + "angle": 0, + "content": "entity (e.g., Denmark) in the answer to a single-hop question and then searched for this entity in other single-hop questions. (2) Next, we used Mixtral-8x22B [24] to combine the two single-hop questions. (3) We filtered the generated multi-hop questions using another LLM (GPT-4o [2]), which answered the questions based on the context of the two initial single-hop questions and their answers. If the predicted answer was the same as the answer to the second single-hop question, the multi-hop question was validated. Finally, we manually reviewed the filtered questions to ensure their quality before including them in our dataset." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.515, + 0.906, + 0.622 + ], + "angle": 0, + "content": "Negative candidates mining. We produced negative image candidates for retrievers to sift through for every question, used only during inference. We first extracted OCR text from images in the COYO-700M dataset [6], a web-scaled image collection. Subsequently, we mined negative images where the OCR text exhibits high lexical overlap with the question but does not contain the correct answer." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.636, + 0.817, + 0.652 + ], + "angle": 0, + "content": "3.3. Comparison with Related Datasets" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.659, + 0.907, + 0.903 + ], + "angle": 0, + "content": "Table 1 shows the statistics of OpenDocVQA and other related datasets, including ViDoRe [17] and Dureader_vis [46]. OpenDocVQA has three unique key properties: First, it is the first large-scale collection of open-domain DocumentVQA datasets to address open document types, whereas ViDoRe considers six document types for only the retrieval task and Dureader_vis is limited to webpages. Second, the questions in OpenDocVQA are context-independent and require visual semantic search, whereas ViDoRe's questions are context-dependent, and even lexical search methods yield sufficient performance in Dureader_vis. This indicates our dataset better reflects real-world scenarios. Lastly, unlike ViDoRe and Dureader_vis, OpenDocVQA requires multi-hop reasoning with extractive (e.g., span, list) and abstractive (e.g., arithmetic, counting, no answer) answer types, providing a more challenging setting." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.113, + 0.09, + 0.495, + 0.263 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.499, + 0.091, + 0.88, + 0.265 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.278, + 0.908, + 0.308 + ], + "angle": 0, + "content": "Figure 3. Overview of our VDocRAG model. VDocRetriever retrieves document images related to the question from a corpus of document images, and VDocGenerator uses these retrieved images to generate the answer." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.332, + 0.253, + 0.349 + ], + "angle": 0, + "content": "4. Proposed Model" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.357, + 0.303, + 0.373 + ], + "angle": 0, + "content": "4.1. Architecture Overview" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.381, + 0.484, + 0.456 + ], + "angle": 0, + "content": "As shown in Figure 3, VDocRAG consists of two components: VDocRetriever and VDocGenerator. Our approach adopts the pre-trained LVLMs to unify the varied formats and modalities in a single form as an image for direct document understanding." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.481, + 0.484, + 0.603 + ], + "angle": 0, + "content": "Dynamic high-resolution image encoding. To encode high-resolution images with various aspect ratios, a dynamic cropping [14, 65] is utilized to split the image into smaller patches while maintaining the integrity of the original aspect ratio. Each patch is a small image with \\(336 \\times 336\\) size, and we treat them as individual inputs for the image encoder. After encoding images, we convert them via a projector (two-layer MLP) into visual document features \\(\\mathbf{z}_{\\mathrm{d}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.626, + 0.484, + 0.804 + ], + "angle": 0, + "content": "VDocRetriever. VDocRetriever is an LVLM-based dual-encoder architecture that encodes queries and document images independently. We append an \\(<\\mathrm{EOS}>\\) token to the end of the question and visual document features \\(\\mathbf{z}_{\\mathrm{d}}\\), and then feed them into the LLM to obtain the question and visual document embeddings \\((\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}})\\) by taking the last layer \\(<\\mathrm{EOS}>\\) vector. Then, it retrieves \\(k\\) documents \\(\\hat{\\mathcal{I}}\\) with the \\(k\\) highest similarity scores to the question. Formally, the similarity scores between the question and visual document embeddings are computed via maximum inner product search [15], as follows: \\(\\mathrm{SIM}(\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}}) = \\frac{\\mathbf{h}_{\\mathrm{q}}^{\\top} \\mathbf{h}_{\\mathrm{d}}}{\\|\\mathbf{h}_{\\mathrm{q}}\\| \\|\\mathbf{h}_{\\mathrm{d}}\\|}\\)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.825, + 0.484, + 0.901 + ], + "angle": 0, + "content": "VDocGenerator. VDocGenerator adapts LVLM to generate answers \\( A \\) given the question \\( Q \\) and the retrieved \\( k \\) documents \\( \\hat{\\mathcal{I}} \\) obtained from VDocRetriever. After encoding the retrieval result, we concatenate the question and the encoded result, then feed this combined input into the LLM." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.332, + 0.816, + 0.349 + ], + "angle": 0, + "content": "4.2. Self-Supervised Pre-training Tasks" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.354, + 0.907, + 0.506 + ], + "angle": 0, + "content": "Figure 4a and 4b show our pre-taining tasks in VDocRetriever. The goal of pre-training is to transfer the powerful understanding and generation abilities of LVLMs to facilitate their usage in visual document retrieval. To this end, we propose two new self-supervised pre-training tasks to compress the entire image representation into the token at the end of the input image. Our pre-training process passes the document image, and its extracted OCR text is used as a pseudo target. Full pre-training objectives is defined as \\(\\mathcal{L} = \\mathcal{L}_{\\mathrm{RCR}} + \\mathcal{L}_{\\mathrm{RCG}}\\)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.525, + 0.906, + 0.646 + ], + "angle": 0, + "content": "Representation Compression via Retrieval (RCR). We compress image representations with a contrastive learning task that retrieves images relevant to their corresponding OCR text, by leveraging LVLM's image understanding capabilities. As shown in Figure 4a, we first construct positive OCR text-image pairs \\((\\mathbf{h}_0,\\mathbf{h}_{\\mathrm{d}^+})\\) from raw unlabeled document images. Then, we adopt in-batch negatives to calculate the contrastive loss by InfoNCE [44] as follows:" + }, + { + "type": "equation", + "bbox": [ + 0.567, + 0.656, + 0.905, + 0.691 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R C R}} = - \\log \\frac {\\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i \\in \\mathcal {B}} \\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} _ {i}}\\right) / \\tau\\right)}, \\tag {1}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.701, + 0.905, + 0.731 + ], + "angle": 0, + "content": "where \\(\\tau\\) is a temperature hyperparameter to scale the logits, and \\(\\mathcal{B}\\) represents the batch size." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.75, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Representation Compression via Generation (RCG). We propose a representation training strategy that leverages the generative capabilities of LVLMs through a customized attention mask matrix. As depicted in Figure 4b, representations for the image tokens, including the token, are obtained via a standard auto-regressive process. In contrast, for the subsequent L OCR token representations, we mask the image token representations and allow only the attention of token and the preceding OCR tokens. This approach facilitates pooling the image representations" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.115, + 0.101, + 0.136, + 0.117 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.137, + 0.105, + 0.178, + 0.114 + ], + "angle": 0, + "content": "Trainable" + }, + { + "type": "image", + "bbox": [ + 0.187, + 0.105, + 0.235, + 0.114 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.298, + 0.096, + 0.448, + 0.109 + ], + "angle": 0, + "content": "Self-Supervised Pre-training" + }, + { + "type": "image", + "bbox": [ + 0.108, + 0.131, + 0.357, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.28, + 0.358, + 0.293 + ], + "angle": 0, + "content": "(a) Representation Compression via Retrieval (RCR)" + }, + { + "type": "image", + "bbox": [ + 0.374, + 0.093, + 0.649, + 0.276 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.375, + 0.28, + 0.644, + 0.293 + ], + "angle": 0, + "content": "(b) Representation Compression via Generation (RCG)" + }, + { + "type": "image_caption", + "bbox": [ + 0.713, + 0.097, + 0.838, + 0.11 + ], + "angle": 0, + "content": "Supervised Fine-tuning" + }, + { + "type": "image", + "bbox": [ + 0.656, + 0.131, + 0.9, + 0.277 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.704, + 0.28, + 0.855, + 0.292 + ], + "angle": 0, + "content": "(c) Visual Document Retrieval" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.309, + 0.908, + 0.339 + ], + "angle": 0, + "content": "Figure 4. Our pre-training tasks using unlabeled documents and fine-tuning in VDocRetriever. The RCR task retrieves relevant images given corresponding OCR tokens, and the RCG task outputs OCR tokens by paying attention to only the token." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.36, + 0.482, + 0.501 + ], + "angle": 0, + "content": "
DatasetDocuments%Filtered#Images#Train&Dev#Test
DocVQA [42]Industry84.812,7676,382-
InfoVQA [43]Infographic61.25,4859,5921,048
VisualMRC [56]Webpage71.910,2296,126-
ChartQA [41]Chart94.020,882-150
OpenWikiTable [27]Table0.01,2574,261-
DUDE [28]Open92.327,9552,135496
MPMQA [68]Manual81.710,0183,054-
SlideVQA [57]\\$Slide66.752,380-760
MHDocVQA\\$Open9.528,5509,470-
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.511, + 0.484, + 0.555 + ], + "angle": 0, + "content": "Table 2. Datasets in OpenDocVQA. \\(\\S\\) denotes datasets requiring multi-hop reasoning. Note that MHDocVQA was created using only the training datasets." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.577, + 0.427, + 0.593 + ], + "angle": 0, + "content": "into \\(<\\mathsf{EOS}>\\) token. The loss function is defined as:" + }, + { + "type": "equation", + "bbox": [ + 0.155, + 0.598, + 0.483, + 0.64 + ], + "angle": 0, + "content": "\\[\n\\mathcal {L} _ {\\mathrm {R C G}} = - \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\log p \\left(y _ {i} \\mid y _ {< i}, < \\mathrm {E O S} >\\right), \\tag {2}\n\\]" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.646, + 0.383, + 0.661 + ], + "angle": 0, + "content": "where \\(y_{i}\\) denotes the \\(i\\)-th token of the OCR." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.669, + 0.308, + 0.685 + ], + "angle": 0, + "content": "4.3. Supervised Fine-tuning" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.691, + 0.484, + 0.783 + ], + "angle": 0, + "content": "We first fine-tune the VDocRetriever with the contrastive learning objective using query-document pairs with in-batch negatives (see Figure 4c). Then, we apply the trained VDocRetriever to search over the corpus \\(\\mathcal{I}\\) to feed the top-k documents into the VDocGenerator. Finally, we train the VDocGenerator using the next-token prediction objective." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.794, + 0.224, + 0.811 + ], + "angle": 0, + "content": "5. Experiments" + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.818, + 0.281, + 0.835 + ], + "angle": 0, + "content": "5.1. Experimental Setup" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Pre-training dataset. For pre-training, we gathered 500k samples containing document image and OCR text pairs filtered from the DocStruct4M [20]. We excluded any images that appeared in the test set to avoid data contamination." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.364, + 0.907, + 0.501 + ], + "angle": 0, + "content": "Fine-tuning and evaluation datasets. We evaluated our models in both zero-shot and supervised settings. The zero-shot evaluation assessed the models' generalization capabilities on unseen datasets, while the supervised evaluation measured performance when training samples were available. As shown in Table 2, we trained our models on seven datasets and evaluated them on four datasets, including ChartQA and SlideVQA in the zero-shot setting, and InfoVQA and DUDE in the supervised setting." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.519, + 0.909, + 0.701 + ], + "angle": 0, + "content": "Implementation details. We initialized VDocRAG with Phi3V [1], a state-of-the-art LVLM trained on high-resolution images and multi-image data. The parameters of VDocRetriever and VDocGenerator were not shared. We employed LoRA [21] with LLM while keeping other parameters frozen during training. We trained VDocRAG for one epoch on eight A100-80G GPUs with AdamW [36] optimizer and FlashAttention [11], using batch sizes of 16 for pre-training and 64 for fine-tuning. We set the temperature \\(\\tau\\) to 0.01. We applied Tesseract [54] to extract OCR text in images. By default, we used the top three documents obtained from VDocRetirver." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.72, + 0.909, + 0.902 + ], + "angle": 0, + "content": "Retrieval baselines. We compared VDocRetriever with two categories of retrievers. The first category includes off-the-shelf text retrieval models on extracted text and image retrieval models. These consist of BM25 [52], a lexical matching model; Contriver [22], E5 [59], and GTE [34], which are popular strong text embedding models based on BERT [12]; E5-Mistral [60] and NV-Embedv2 [30], which are state-of-the-art LLM-based embedding models; CLIP [47], a dual-encoder vision-language model; DSE [37] and VisRAG-Ret [66], which are state-of-the-art visual document retrieval models. The second category includes fine-tuned models trained on OpenDocVQA. To" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.098, + 0.089, + 0.902, + 0.3 + ], + "angle": 0, + "content": "
ModelInitDocsScale#PT#FTChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Off-the-shelf
BM25 [52]-Text00054.815.640.738.750.231.357.247.5
Contriever [22]BERT [12]Text110M1B500K66.959.350.846.542.521.040.629.7
E5 [59]BERT [12]Text110M270M1M74.966.353.649.649.226.945.038.9
GTE [34]BERT [12]Text110M788M3M72.864.755.449.151.332.542.436.0
E5-Mistral [60]Mistral [23]Text7.1B01.85M72.370.063.857.660.333.952.245.2
NV-Embed-v2 [30]Mistral [23]Text7.9B02.46M75.370.761.758.156.534.243.038.6
CLIP [47]ScratchImage428M400M054.638.638.129.745.320.623.217.6
DSE [37]Phi3V [1]Image4.2B05.61M72.768.573.067.267.449.655.547.7
VisRAG-Ret [66]MiniCPM-V [63]Image3.4B0240K87.2*75.5*74.3*68.4*71.9*51.7*56.444.5
Trained on OpenDocVQA
Phi3 [1]Phi3V [1]Text4B041K72.565.353.348.453.2*33.0*40.5*32.0*
VDocRetriever†Phi3V [1]Image4.2B041K84.2+11.774.8+9.571.0+17.765.1+16.766.8*+13.652.8*+19.848.4*+7.941.0*+9.0
VDocRetrieverPhi3V [1]Image4.2B500K41K86.0+1.876.4+1.677.3+6.373.3+8.272.9*+6.155.5*+2.757.7*+9.350.9*+9.9
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.31, + 0.907, + 0.355 + ], + "angle": 0, + "content": "Table 3. Retrieval results under the single- (Single) and all-pool (All) settings. * indicates performance on test data for which corresponding training samples are available. All other results represent zero-shot performance. Init, FT, and PT denote the initialization model, finetuning, and pre-training, respectively. Performance gains in green and blue are compared to the base LLM and VDocRetirver†, respectively." + }, + { + "type": "table", + "bbox": [ + 0.147, + 0.365, + 0.855, + 0.511 + ], + "angle": 0, + "content": "
GeneratorRetrieverDocsChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Closed-book
Phi3--20.020.020.320.334.9*34.9*23.1*23.1*
Text-based RAG
Phi3Phi3Text28.028.028.628.040.5*39.1*40.1*35.7*
Phi3GoldText36.636.627.827.845.6*45.6*55.9*55.9*
VDocRAG (Ours)
VDocGeneratorVDocRetrieverImage52.0+24.048.0+20.044.2+15.642.0+14.056.2*+15.749.2*+10.148.5*+8.444.0*+8.3
VDocGeneratorGoldImage74.074.056.456.464.6*64.6*66.4*66.4*
" + }, + { + "type": "table_caption", + "bbox": [ + 0.089, + 0.522, + 0.908, + 0.566 + ], + "angle": 0, + "content": "Table 4. DocumentVQA results. All models are fine-tuned on OpenDocVQA. The results marked with * denote performance on unseen test samples, and the other results represent zero-shot performance. The performance gain in green is compared to the text-based RAG that has the same base LLM. Gold knows the ground-truth documents. Models answer the question based on the top three retrieval results." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.59, + 0.482, + 0.667 + ], + "angle": 0, + "content": "verify the effectiveness of encoding documents through images, we fine-tuned the LLM in VDocRetriever (Phi3 [1]) using extracted text to represent documents. Additionally, we included a variant of VDocRetriever without pretraining (VDocRetriever†)." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.685, + 0.483, + 0.807 + ], + "angle": 0, + "content": "QA baselines. We compared VDocRAG against closed-book and text-based RAG models. These baselines used the same model initialization as VDocRAG but fine-tuned only the LLM (Phi3). The closed-book model received only the question as input, while the text-based RAG used the top three documents retrieved by the Phi3 retriever. Moreover, we assessed possible upper-bound performance by testing generation with ground-truth (Gold) documents." + }, + { + "type": "text", + "bbox": [ + 0.089, + 0.825, + 0.483, + 0.903 + ], + "angle": 0, + "content": "Evaluation metrics. We evaluated retrieval performance using nDCG@5, a widely used metric in information retrieval [17, 25]. For the DocumentVQA task, we followed the evaluation protocol of each dataset, we used ANLS [4] for InfoVQA and DUDE, Relaxed Accuracy [41] for" + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.59, + 0.845, + 0.607 + ], + "angle": 0, + "content": "ChartQA, F1 for SlideVQA as evaluation metrics." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.617, + 0.683, + 0.632 + ], + "angle": 0, + "content": "5.2. Retrieval Results" + }, + { + "type": "text", + "bbox": [ + 0.511, + 0.639, + 0.907, + 0.838 + ], + "angle": 0, + "content": "Table 3 shows that VDocRetriever† achieved significantly higher retrieval performance than the text-based Phi3 retriever on all datasets under the same conditions. This indicates that our model can effectively encode documents in image format for retrieval tasks. Furthermore, VDocRetriever exhibits superior zero-shot generalization on unseen datasets, ChartQA and SlideVQA, outperforming both off-the-shelf text retrievers and state-of-the-art visual document retrieval models. Notably, DSE was initialized with the same LVLM as ours and fine-tuned on 13.7 times more data. This highlights that our pre-training strategy and the OpenDocVQA dataset offer unique advantages that are not adequately addressed by existing approaches." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.848, + 0.868, + 0.865 + ], + "angle": 0, + "content": "5.3. Retrieval-Augmented Generation Results" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.907, + 0.901 + ], + "angle": 0, + "content": "Table 4 shows that VDocRAG significantly outperformed both the closed-book LLM and the text-based RAG on" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.095, + 0.089, + 0.482, + 0.185 + ], + "angle": 0, + "content": "
ModelSlideVQAInfoVQA
VDocRetriever77.372.9
w/o RCR75.9-1.471.1-1.8
w/o RCG71.7-5.668.8-4.1
w/o RCG & RCR71.0-6.366.8-6.1
w/o LLM & Projector (→CLIP encoders)43.7-33.637.9-35.0
" + }, + { + "type": "table_caption", + "bbox": [ + 0.091, + 0.195, + 0.483, + 0.224 + ], + "angle": 0, + "content": "Table 5. Ablation study of our pre-training tasks and model architecture in the retrieval task under the single-pool setting." + }, + { + "type": "table", + "bbox": [ + 0.095, + 0.238, + 0.487, + 0.318 + ], + "angle": 0, + "content": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
VDocRAG77.372.944.256.2
w/o MHDocVQA75.0-2.371.4-1.543.4-0.853.8-2.4
w/o except MHDocVQA68.8-8.561.7-11.241.1-3.144.0-12.2
" + }, + { + "type": "table_caption", + "bbox": [ + 0.09, + 0.328, + 0.484, + 0.357 + ], + "angle": 0, + "content": "Table 6. Ablation study of our dataset in retrieval and QA tasks under the single-pool setting." + }, + { + "type": "image", + "bbox": [ + 0.093, + 0.371, + 0.289, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.124, + 0.507, + 0.27, + 0.521 + ], + "angle": 0, + "content": "(a) Retrieval performance" + }, + { + "type": "image", + "bbox": [ + 0.293, + 0.372, + 0.484, + 0.5 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.331, + 0.507, + 0.446, + 0.52 + ], + "angle": 0, + "content": "(b) QA performance" + }, + { + "type": "image_caption", + "bbox": [ + 0.09, + 0.537, + 0.484, + 0.567 + ], + "angle": 0, + "content": "Figure 5. Performance under different document lengths on InfoVQA (single-pool setting)." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.594, + 0.484, + 0.761 + ], + "angle": 0, + "content": "the DocumentVQA task, even when all models were the same initialization. Additionally, when the retrieval results were fixed to ground-truth (Gold) documents, VDocRAG demonstrated superior performance to text-based RAG. This underscores the importance of visual cues in extracting answers from documents and suggests that VDocGenerator has a higher upper-bound performance. Both text-based RAG and VDocRAG exhibited substantial improvements when provided with ground-truth documents, highlighting potential areas for enhancing retrieval accuracy and improving the generator's robustness to retrieval noise." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.772, + 0.194, + 0.789 + ], + "angle": 0, + "content": "5.4. Analysis" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.795, + 0.484, + 0.903 + ], + "angle": 0, + "content": "Can our pre-training tasks be beneficial? Table 5 shows that VDocRetriever outperformed the model without pretraining. Removing each pre-training task or both RCG and RCR tasks decreased performance, indicating that both tasks contribute complementarily. These validate that our pre-training effectively learns to compress image features while aligning them with textual contents in images." + }, + { + "type": "table", + "bbox": [ + 0.541, + 0.089, + 0.883, + 0.158 + ], + "angle": 0, + "content": "
ModelRetrievalQA
OCREncodingGenerationTotal
Text-based RAGphi3590.070.7422.71083.4
VDocRAG-204.4789.7994.1
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.168, + 0.907, + 0.21 + ], + "angle": 0, + "content": "Table 7. Efficiency analysis on InfoVQA. The average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU." + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.226, + 0.905, + 0.307 + ], + "angle": 0, + "content": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
Text-based RAGLLama360.161.837.849.5
VDocRAGIdefics373.472.548.959.9
w/o Pre-train70.369.847.259.6
" + }, + { + "type": "table_caption", + "bbox": [ + 0.513, + 0.317, + 0.907, + 0.346 + ], + "angle": 0, + "content": "Table 8. Analysis with different LVLM (Idefics3) in retrieval and QA tasks under the single-pool setting." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.374, + 0.907, + 0.466 + ], + "angle": 0, + "content": "Does LLM help understanding document images? Table 5 shows that retrieval performance dropped substantially when the LLM block was removed, leaving only the CLIP text/vision encoder, even with the same visual transformer backbone. This suggests that LLM can capture finer-grained visual details and enhance semantic understanding." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.489, + 0.909, + 0.626 + ], + "angle": 0, + "content": "Does our dataset improve the performance? Table 6 shows that removing MHDocVQA caused a performance decrease, indicating that MHDocVQA requires distinct reasoning skills compared to other collected datasets in OpenDocVQA. Additionally, excluding all OpenDocVQA datasets except MHDocVQA led to a significant performance drop. This confirms that our collected datasets effectively supplement the missing capabilities of LVLM in document retrieval and understanding." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.65, + 0.909, + 0.772 + ], + "angle": 0, + "content": "How well does VDocRAG perform under different document lengths? Figure 5 shows that VDocRAG consistently outperforms text-based RAG, indicating that VDocRAG can better understand documents through visual information. In general, we observed that the VDocRAG's relative performance over text-based RAG is larger for images with 0-10 words (+66.0 in retrieval, +21.1 in QA) than for those with 500+ words (+28.4 in retrieval, +16.7 in QA)." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.795, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Is VDocRAG more efficient than text-based RAG? Table 7 shows that VDocRAG is more efficient than text-based RAG. Especially, VDocRAG requires \\(69\\%\\) less inference time to retrieve documents than text-based RAG. Although VDocRetriever takes more time for document encoding and generation, it eliminates the time-consuming OCR processing necessary for text-based RAG." + } + ], + [ + { + "type": "image", + "bbox": [ + 0.095, + 0.09, + 0.904, + 0.319 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.282, + 0.328, + 0.715, + 0.343 + ], + "angle": 0, + "content": "Figure 6. Qualitative results of VDocRAG compared to text-based RAG." + }, + { + "type": "image", + "bbox": [ + 0.102, + 0.367, + 0.268, + 0.488 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.1, + 0.492, + 0.287, + 0.514 + ], + "angle": 0, + "content": "(a) VDocRAG answers correctly, but Text-based RAG answers incorrectly" + }, + { + "type": "image", + "bbox": [ + 0.312, + 0.367, + 0.473, + 0.489 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.297, + 0.492, + 0.478, + 0.514 + ], + "angle": 0, + "content": "(b) VDocRAG answers incorrectly, but Text-based RAG answers correctly" + }, + { + "type": "image_caption", + "bbox": [ + 0.114, + 0.53, + 0.46, + 0.544 + ], + "angle": 0, + "content": "Figure 7. Root causes of correct and incorrect predictions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.572, + 0.483, + 0.678 + ], + "angle": 0, + "content": "Can our method apply different LVLMs? To investigate the impact of different LVLMs on VDocRAG, we replaced Phi3V with Idefics3 [29], a state-of-the-art LVLM that uses Llama3-8B [16] as its backbone LLM. As observed in Table 8, the performance trend was consistent with that of Phi3V, highlighting the versatility and broad applicability of our method." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.699, + 0.484, + 0.851 + ], + "angle": 0, + "content": "Qualitative results. Figure 6 illustrates the performance of our model through qualitative examples. In the top example, VDocRAG demonstrates strong performance on a question requiring multi-hop reasoning and graph understanding across multi-page slides. In the bottom example, VDocRAG also performs better on a question that requires parsing on the table with cells spanning multiple rows and columns. In contrast, text-based RAG depends solely on OCR text information, leading to a superficial understanding of the text and incorrect predictions." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.484, + 0.901 + ], + "angle": 0, + "content": "Human evaluation. To better understand the prediction differences between VDocRAG and text-based RAG, we" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.37, + 0.907, + 0.536 + ], + "angle": 0, + "content": "manually analyzed the generated outputs by identifying the root causes of 50 correct and 50 incorrect predictions, randomly sampled from test samples. Figure 7a shows that VDocRAG significantly enhances the understanding of visual data (e.g., charts). Conversely, Figure 7b reveals that VDocRAG encounters challenges with text-heavy documents (e.g., books), primarily due to the OCR capabilities. We observed that text-based RAG correctly answers questions when visual data includes long titles or subtitles, which have a high textual overlap with the question. These observations are in line with the results shown in Figure 5." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.551, + 0.634, + 0.567 + ], + "angle": 0, + "content": "6. Conclusion" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.577, + 0.907, + 0.744 + ], + "angle": 0, + "content": "We introduced a new RAG framework, VDocRAG, which can directly understand various real-world documents. We enhanced VDocRAG with two key contributions: (1) pretraining tasks capable of learning image representation efficiently by leveraging the powerful capabilities of LVLMs, and (2) OpenDocVQA, the first unified open-domain DocumentVQA dataset that encompasses a wide range of visually-rich documents. Our holistic evaluations on four datasets show that VDocRAG significantly outperformed conventional text-based RAG, shedding light on the development of an effective RAG over real-world documents." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.765, + 0.909, + 0.903 + ], + "angle": 0, + "content": "Limitations. While we focused on pre-training to align images and OCR data for document retrieval, leveraging caption data instead of OCR data offers the potential for retrieving images that do not contain text. Moreover, this study did not address reducing the computational cost of creating search indexes for extensive image collections. We plan to reduce the cost of VDocRAG using more efficient techniques. Lastly, joint training of QA and retrieval components simultaneously further optimizes their interactions." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.093, + 0.09, + 0.188, + 0.106 + ], + "angle": 0, + "content": "References" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.115, + 0.482, + 0.184 + ], + "angle": 0, + "content": "[1] Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv:2404.14219, 2024. 2, 5, 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.101, + 0.185, + 0.482, + 0.24 + ], + "angle": 0, + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. GPT-4 technical report. arXiv:2303.08774, 2023. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.242, + 0.482, + 0.283 + ], + "angle": 0, + "content": "[3] Akari Asai, Sewon Min, Zexuan Zhong, and Danqi Chen. Retrieval-based language models and applications. In ACL, pages 41-46, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.285, + 0.482, + 0.338 + ], + "angle": 0, + "content": "[4] Ali Furkan Biten, Rubén Tito, Andrés Mafla, Lluis Gómez i Bigorda, Marçal Rusinol, C. V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In ICCV, pages 4290-4300, 2019. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.341, + 0.482, + 0.422 + ], + "angle": 0, + "content": "[5] Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In ICML, pages 2206-2240, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.424, + 0.482, + 0.478 + ], + "angle": 0, + "content": "[6] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.481, + 0.482, + 0.535 + ], + "angle": 0, + "content": "[7] Jingwen Chen, Yingwei Pan, Yehao Li, Ting Yao, Hongyang Chao, and Tao Mei. Retrieval augmented convolutional encoder-decoder networks for video captioning. TOMCCAP, pages 1-24, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.537, + 0.482, + 0.577 + ], + "angle": 0, + "content": "[8] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv:2209.14491, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.102, + 0.579, + 0.482, + 0.633 + ], + "angle": 0, + "content": "[9] Jaemin Cho, Debanjan Mahata, Ozan Irsoy, Yujie He, and Mohit Bansal. M3DocRAG: Multi-modal retrieval is what you need for multi-page multi-document understanding. arXiv:2411.04952, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.635, + 0.482, + 0.703 + ], + "angle": 0, + "content": "[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. arXiv:2305.06500, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.705, + 0.482, + 0.758 + ], + "angle": 0, + "content": "[11] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with io-awareness. In NeurIPS, pages 16344-16359, 2022. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.761, + 0.482, + 0.815 + ], + "angle": 0, + "content": "[12] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pages 4171–4186, 2019. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.818, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[13] Kuicai Dong, Yujing Chang, Xin Deik Goh, Dexun Li, Ruiming Tang, and Yong Liu. MMDocIR: Benchmarking multi-modal retrieval for long documents. arXiv:2501.08828, 2025. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.094, + 0.873, + 0.482, + 0.902 + ], + "angle": 0, + "content": "[14] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan," + }, + { + "type": "list", + "bbox": [ + 0.094, + 0.115, + 0.482, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.545, + 0.093, + 0.905, + 0.134 + ], + "angle": 0, + "content": "Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv:2404.06512, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.136, + 0.905, + 0.19 + ], + "angle": 0, + "content": "[15] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. The faiss library. arXiv:2401.08281, 2024. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.193, + 0.905, + 0.247 + ], + "angle": 0, + "content": "[16] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv:2407.21783, 2024. 1, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.25, + 0.905, + 0.304 + ], + "angle": 0, + "content": "[17] Manuel Faysse, Hugues Sibille, Tony Wu, Gautier Vi-aud, Céline Hudelot, and Pierre Colombo. ColPali: Efficient document retrieval with vision language models. arXiv:2407.01449, 2024. 1, 2, 3, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.307, + 0.905, + 0.347 + ], + "angle": 0, + "content": "[18] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pretraining. In ICML, pages 3929-3938, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.35, + 0.905, + 0.391 + ], + "angle": 0, + "content": "[19] Matthew Honnibal and Ines Montani. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear, 2017. 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.394, + 0.905, + 0.448 + ], + "angle": 0, + "content": "[20] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. nplug-docowl 1.5: Unified structure learning forOCR-free document understanding. arXiv:2403.12895, 2024. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.45, + 0.905, + 0.504 + ], + "angle": 0, + "content": "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. arXiv:2106.09685, 2021. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.507, + 0.905, + 0.561 + ], + "angle": 0, + "content": "[22] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. Unsupervised dense information retrieval with contrastive learning. arXiv:2112.09118, 2021. 5, 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.564, + 0.905, + 0.658 + ], + "angle": 0, + "content": "[23] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. arXiv:2310.06825, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.518, + 0.662, + 0.905, + 0.729 + ], + "angle": 0, + "content": "[24] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv:2401.04088, 2024. 1, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.733, + 0.905, + 0.786 + ], + "angle": 0, + "content": "[25] Ehsan Kamalloo, Nandan Thakur, Carlos Lassance, Xueguang Ma, Jheng-Hong Yang, and Jimmy Lin. Resources for brewing heir: Reproducible reference models and an official leaderboard, 2023. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.789, + 0.905, + 0.844 + ], + "angle": 0, + "content": "[26] Yuma Koizumi, Yasunori Ohishi, Daisuke Niizumi, Daiki Takeuchi, and Masahiro Yasuda. Audio captioning using pre-trained large-scale language model guided by audiobased similar caption retrieval. arXiv:2012.07331, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.846, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[27] Sunjun Kweon, Yeonsu Kwon, Seonhee Cho, Yohan Jo, and Edward Choi. Open-WikiTable: Dataset for open domain question answering with complex reasoning over table. In Findings of ACL, pages 8285-8297, 2023. 3, 5, 1" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.161 + ], + "angle": 0, + "content": "[28] Jordy Landeghem, Rubén Tito, Łukasz Borchmann, Michal Pietruszka, Paweł Józiak, Rafał Powalski, Dawid Jurkiewicz, Mickaël Coustaty, Bertrand Ackaert, Ernest Valveny, et al. Document understanding dataset and evaluation (dude). In ICCV, pages 19528-19540, 2023. 2, 3, 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.162, + 0.482, + 0.218 + ], + "angle": 0, + "content": "[29] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. arXiv:2408.12637, 2024. 2, 8" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.218, + 0.482, + 0.273 + ], + "angle": 0, + "content": "[30] Chankyu Lee, Rajarshi Roy, Mengyao Xu, Jonathan Raiman, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvEmbed: Improved techniques for training llms as generalist embedding models. arXiv:2405.17428, 2024. 5, 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.274, + 0.482, + 0.343 + ], + "angle": 0, + "content": "[31] Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. In NIPS, pages 9459-9474, 2020. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.343, + 0.482, + 0.399 + ], + "angle": 0, + "content": "[32] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, pages 12888-12900, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.4, + 0.482, + 0.455 + ], + "angle": 0, + "content": "[33] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, pages 19730–19742, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.455, + 0.482, + 0.51 + ], + "angle": 0, + "content": "[34] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. Towards general text embeddings with multi-stage contrastive learning. arXiv:2308.03281, 2023. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.511, + 0.482, + 0.538 + ], + "angle": 0, + "content": "[35] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv:2304.08485, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.539, + 0.482, + 0.566 + ], + "angle": 0, + "content": "[36] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv:1711.05101, 2017. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.567, + 0.482, + 0.621 + ], + "angle": 0, + "content": "[37] Xueguang Ma, Sheng-Chieh Lin, Minghan Li, Wenhu Chen, and Jimmy Lin. Unifying multimodal retrieval via document screenshot embedding. arXiv:2406.11251, 2024. 1, 2, 5, 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.622, + 0.482, + 0.677 + ], + "angle": 0, + "content": "[38] Xueguang Ma, Shengyao Zhuang, Bevan Koopman, Guido Zuccon, Wenhu Chen, and Jimmy Lin. VISA: Retrieval augmented generation with visual source attribution. arXiv:2412.14457, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.679, + 0.482, + 0.734 + ], + "angle": 0, + "content": "[39] Seiji Maekawa, Hayate Iso, Sairam Gurajada, and Nikita Bhutani. Retrieval helps or hurts? a deeper dive into the efficacy of retrieval augmentation to language models. In NAACL, pages 5506-5521, 2024. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.735, + 0.482, + 0.802 + ], + "angle": 0, + "content": "[40] Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In ACL, pages 9802-9822, 2023. 1, 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.803, + 0.482, + 0.859 + ], + "angle": 0, + "content": "[41] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In Findings of ACL, pages 2263-2279, 2022. 2, 3, 5, 6, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.86, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[42] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. DocVQA: A dataset for vqa on document images. In WACV, pages 2200-2209, 2021. 1, 2, 5" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.093, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.135 + ], + "angle": 0, + "content": "[43] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C.V. Jawahar. InfographicVQA. In WACV, pages 1697-1706, 2022. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.136, + 0.905, + 0.176 + ], + "angle": 0, + "content": "[44] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv:1807.03748, 2018. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.178, + 0.905, + 0.233 + ], + "angle": 0, + "content": "[45] Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Retrieval augmented code generation and summarization. arXiv:2108.11601, 2021. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.234, + 0.905, + 0.303 + ], + "angle": 0, + "content": "[46] Le Qi, Shangwen Lv, Hongyu Li, Jing Liu, Yu Zhang, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ting Liu. DuReadervis: A Chinese dataset for open-domain document visual question answering. In Findings of ACL, pages 1338-1351, 2022. 2, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.305, + 0.905, + 0.375 + ], + "angle": 0, + "content": "[47] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.376, + 0.905, + 0.444 + ], + "angle": 0, + "content": "[48] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR, 21(140):1-67, 2020. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.446, + 0.905, + 0.502 + ], + "angle": 0, + "content": "[49] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. Incontext retrieval-augmented language models. TACL, pages 1316-1331, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.503, + 0.905, + 0.544 + ], + "angle": 0, + "content": "[50] Rita Ramos, Desmond Elliott, and Bruno Martins. Retrievalaugmented image captioning. In EACL, pages 3666-3681, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.546, + 0.905, + 0.601 + ], + "angle": 0, + "content": "[51] Rita Ramos, Bruno Martins, Desmond Elliott, and Yova Kementchedjhieva. Smallcap: lightweight image captioning prompted with retrieval augmentation. In CVPR, pages 2840-2849, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.602, + 0.905, + 0.645 + ], + "angle": 0, + "content": "[52] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389, 2009. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.646, + 0.905, + 0.701 + ], + "angle": 0, + "content": "[53] Junyoung Seo, Susung Hong, Wooseok Jang, Ines Hyeonsu Kim, Minseop Kwak, Doyup Lee, and Seungryong Kim. Retrieval-augmented score distillation for text-to-3d generation. arXiv:2402.02972, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.702, + 0.905, + 0.731 + ], + "angle": 0, + "content": "[54] Ray Smith. An overview of the tesseractOCR engine. In ICDAR, pages 629-633, 2007. 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.732, + 0.905, + 0.8 + ], + "angle": 0, + "content": "[55] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv:2210.09261, 2022. 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.802, + 0.905, + 0.844 + ], + "angle": 0, + "content": "[56] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. VisualMRC: Machine reading comprehension on document images. In AAAI, pages 13878-13888, 2021. 1, 2, 3, 5" + }, + { + "type": "ref_text", + "bbox": [ + 0.517, + 0.845, + 0.905, + 0.901 + ], + "angle": 0, + "content": "[57] Ryota Tanaka, Kyosuke Nishida, Kosuke Nishida, Taku Hasegawa, Itsumi Saito, and Kuniko Saito. SlideVQA: A dataset for document visual question answering on multiple images. In AAAI, pages 13636-13645, 2023. 1, 2, 3, 5" + }, + { + "type": "list", + "bbox": [ + 0.517, + 0.093, + 0.905, + 0.901 + ], + "angle": 0, + "content": null + } + ], + [ + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.147 + ], + "angle": 0, + "content": "[58] Ryota Tanaka, Taichi Iki, Kyosuke Nishida, Kuniko Saito, and Jun Suzuki. Instructdoc: A dataset for zero-shot generalization of visual document understanding with instructions. In AAAI, pages 19071-19079, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.149, + 0.482, + 0.205 + ], + "angle": 0, + "content": "[59] Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. Text embeddings by weakly-supervised contrastive pretraining. arXiv:2212.03533, 2022. 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.207, + 0.482, + 0.261 + ], + "angle": 0, + "content": "[60] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. Improving text embeddings with large language models. In ACL, pages 11897-11916, 2024. 5, 6, 3" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.264, + 0.482, + 0.305 + ], + "angle": 0, + "content": "[61] Jilan Xu, Yifei Huang, Junlin Hou, Guo Chen, Yuejie Zhang, Rui Feng, and Weidi Xie. Retrieval-augmented egocentric video captioning. In CVPR, pages 13525-13536, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.307, + 0.482, + 0.361 + ], + "angle": 0, + "content": "[62] Dongchao Yang, Songxiang Liu, Rongjie Huang, Chao Weng, and Helen Meng. Instructtts: Modelling expressive tts in discrete latent space with natural language style prompt. TASLP, pages 2913-2925, 2024. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.363, + 0.482, + 0.459 + ], + "angle": 0, + "content": "[63] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, Qianyu Chen, Huarong Zhou, Zhensheng Zou, Haoye Zhang, Shengding Hu, Zhi Zheng, Jie Zhou, Jie Cai, Xu Han, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. Minicpm-v: A gpt-4v level mllm on your phone. arXiv:2408.01800, 2024. 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.461, + 0.482, + 0.53 + ], + "angle": 0, + "content": "[64] Michihiro Yasunaga, Armen Aghajanyan, Weijia Shi, Rich James, Jure Leskovec, Percy Liang, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. Retrieval-augmented multimodal language modeling. In ICML, pages 39755-39769, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.532, + 0.482, + 0.615 + ], + "angle": 0, + "content": "[65] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Guohai Xu, Chenliang Li, Junfeng Tian, Qi Qian, Ji Zhang, Qin Jin, Liang He, Xin Lin, and Fei Huang. UReader: Universal OCR-free visually-situated language understanding with multimodal large language model. In EMNLP Findings, pages 2841-2858, 2023. 4" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.617, + 0.482, + 0.685 + ], + "angle": 0, + "content": "[66] Shi Yu, Chaoyue Tang, Bokai Xu, Junbo Cui, Junhao Ran, Yukun Yan, Zhenghao Liu, Shuo Wang, Xu Han, Zhiyuan Liu, et al. VisRAG: Vision-based retrieval-augmented generation on multi-modality documents. arXiv:2410.10594, 2024. 2, 5, 6" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.687, + 0.482, + 0.729 + ], + "angle": 0, + "content": "[67] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.731, + 0.482, + 0.772 + ], + "angle": 0, + "content": "[68] Liang Zhang, Anwen Hu, Jing Zhang, Shuo Hu, and Qin Jin. MPMQA: multimodal question answering on product manuals. In AAAI, pages 13958-13966, 2023. 2, 3, 5, 1" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.774, + 0.482, + 0.828 + ], + "angle": 0, + "content": "[69] Mingyuan Zhang, Xinying Guo, Liang Pan, Zhongang Cai, Fangzhou Hong, Huirong Li, Lei Yang, and Ziwei Liu. Remodiffuse: Retrieval-augmented motion diffusion model. In ICCV, pages 364-373, 2023. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.831, + 0.482, + 0.871 + ], + "angle": 0, + "content": "[70] Shuyan Zhou, Uri Alon, Frank F Xu, Zhiruo Wang, Zhengbao Jiang, and Graham Neubig. Docprompting: Generating code by retrieving the docs. arXiv:2207.05987, 2022. 2" + }, + { + "type": "ref_text", + "bbox": [ + 0.093, + 0.873, + 0.482, + 0.901 + ], + "angle": 0, + "content": "[71] Fengbin Zhu, Wenqiang Lei, Fuli Feng, Chao Wang, Haozhou Zhang, and Tat-Seng Chua. Towards complex doc" + }, + { + "type": "list", + "bbox": [ + 0.093, + 0.092, + 0.482, + 0.901 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.547, + 0.092, + 0.905, + 0.12 + ], + "angle": 0, + "content": "ument understanding by discrete reasoning. In ACMM, pages 4857-4866, 2022. 2" + } + ], + [ + { + "type": "header", + "bbox": [ + 0.1, + 0.089, + 0.133, + 0.117 + ], + "angle": 0, + "content": "#" + }, + { + "type": "header", + "bbox": [ + 0.134, + 0.1, + 0.898, + 0.121 + ], + "angle": 0, + "content": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents" + }, + { + "type": "title", + "bbox": [ + 0.382, + 0.132, + 0.615, + 0.153 + ], + "angle": 0, + "content": "Supplementary Material" + }, + { + "type": "table", + "bbox": [ + 0.127, + 0.167, + 0.449, + 0.41 + ], + "angle": 0, + "content": "
StatisticsNumber
Total Images206,267
Total Questions43,474
- Single-Hop Questions33,244 (76.5%)
- Multi-Hop Questions10,230 (23.5%)
- Extractive Answer19,797 (45.5%)
- Abstractive Answer23,677 (54.5%)
QA Source Datasets9
- Existing DocumentVQA Datasets7
- Existing TableQA Datasets1
- Our Newly Created Datasets1
Maximum Question Length58
Maximum Answer Length130
Average Question Length13.7
Average Answer Length3.7
" + }, + { + "type": "table_footnote", + "bbox": [ + 0.16, + 0.421, + 0.414, + 0.435 + ], + "angle": 0, + "content": "Table A. Main statistics in OpenDocVQA." + }, + { + "type": "image", + "bbox": [ + 0.575, + 0.167, + 0.847, + 0.275 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.636, + 0.277, + 0.785, + 0.289 + ], + "angle": 0, + "content": "(a) Word cloud of questions." + }, + { + "type": "image", + "bbox": [ + 0.574, + 0.289, + 0.846, + 0.397 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.638, + 0.399, + 0.782, + 0.41 + ], + "angle": 0, + "content": "(b) Word cloud of answers." + }, + { + "type": "image_caption", + "bbox": [ + 0.517, + 0.423, + 0.9, + 0.437 + ], + "angle": 0, + "content": "Figure A. Word cloud distributions of question and answer texts." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.464, + 0.304, + 0.481 + ], + "angle": 0, + "content": "A. OpenDocVQA Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.492, + 0.484, + 0.626 + ], + "angle": 0, + "content": "Dataset Statistics. The main statistics of OpenDocVQA are presented in Table A. There are two types of questions: single-hop (45.5%) and multi-hop (23.5%). Answers to questions are categorized as extractive (45.5%) and abstractive (54.5%) types. OpenDocVQA consists of nine open-domain DocumentVQA datasets, including a newly created MHDocVQA dataset to address multi-hop questions over multiple documents, and collected and filtered QA datasets as follows." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.629, + 0.483, + 0.659 + ], + "angle": 0, + "content": "- DocVQA [42] includes industry document images collected from the UCSF Industry Document Library." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.66, + 0.483, + 0.689 + ], + "angle": 0, + "content": "- InfoVQA [43] includes infographics downloaded from the Internet for the search query \"infographics\"." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.69, + 0.483, + 0.719 + ], + "angle": 0, + "content": "- VisualMRC [56] is a visual machine reading comprehension on webpage screenshot images." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.72, + 0.483, + 0.765 + ], + "angle": 0, + "content": "ChartQA [41] is a chart understanding dataset with human-written and machine-generated questions focusing on visual and logical reasoning." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.766, + 0.483, + 0.825 + ], + "angle": 0, + "content": "- OpenWikiTable [27] is an open-domain question answering over tables. We took screenshot images of the tables, converting them into images with complex text layouts to handle visually-rich table data." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.826, + 0.483, + 0.871 + ], + "angle": 0, + "content": "- DUDE [28] is a multi-page, multi-domain, and multi-industry QA dataset that requires processing long documents and understanding different types of documents." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "- MPMQA [68] requires comprehending multimodal content in an entire product manual and answering questions." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.629, + 0.483, + 0.902 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.587, + 0.451, + 0.86, + 0.68 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.537, + 0.69, + 0.88, + 0.705 + ], + "angle": 0, + "content": "Figure B. Distribution of first three words of the question." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.731, + 0.905, + 0.776 + ], + "angle": 0, + "content": "- SlideVQA [57] requires multi-hop reasoning over multiple slide images containing various text formats, layouts, and visual content such as plots and charts." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.777, + 0.905, + 0.853 + ], + "angle": 0, + "content": "Figure A presents word clouds of the most frequently appeared words in the question and answer texts, illustrating that OpenDocVQA covers a wide range of topics and words. This observation is further supported by Figure B, which is a sunburst of the first three words of the questions." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.871, + 0.905, + 0.902 + ], + "angle": 0, + "content": "Filtering DocumentVQA datasets. We applied the following five heuristic rules to automatically filter out likely" + } + ], + [ + { + "type": "code_caption", + "bbox": [ + 0.118, + 0.093, + 0.354, + 0.107 + ], + "angle": 0, + "content": "Multi-hop Question Generation Prompt" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.119, + 0.834, + 0.457 + ], + "angle": 0, + "content": "EXAMPLE1: \nquestion1: In which country is the GWP smallest? \nanswer1: Denmark \nquestion2: What is the staple diet of Denmark? \nanswer2: Fish, cheese \ncombined question: What is the staple diet of the country where the GWP is the smallest? \nEXAMPLE2: \nquestion1: To which League does Chicago Cubs belong? \nanswer1: MLB \nquestion2: What is the average MLB team value? \nanswer2: $1.5b \ncombined question: What is the average the league where Chicago Cubs belongs to team value? \nEXAMPLE3 \nquestion1: Which is the capital city of Germany? \nanswer1: Berlin \nquestion2: What year did Berlin host the OKFestival? \nanswer2: It's 2014. \ncombined question: What year did the capital city of Germany host the OKFestival? \nBased on the above 3 examples, provide a combined question for the following case, such that the answer to the combined question is the same as the answer2: \nquestion1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \ncombined question:" + }, + { + "type": "code_caption", + "bbox": [ + 0.09, + 0.484, + 0.907, + 0.514 + ], + "angle": 0, + "content": "Table B. Multi-hop question generation prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions." + }, + { + "type": "code_caption", + "bbox": [ + 0.118, + 0.53, + 0.339, + 0.545 + ], + "angle": 0, + "content": "Multi-hop Question Filtering Prompt" + }, + { + "type": "code", + "bbox": [ + 0.116, + 0.557, + 0.801, + 0.643 + ], + "angle": 0, + "content": "question1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \nBased on the questions and answers above, please answer the following question shortly. If the answer is not identified, the answer is 'None': {multi-hop question}" + }, + { + "type": "code_caption", + "bbox": [ + 0.09, + 0.671, + 0.908, + 0.701 + ], + "angle": 0, + "content": "Table C. Multi-hop question filtering prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions. “{multi-hop question}” denotes the generated multi-hop questions." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.727, + 0.286, + 0.741 + ], + "angle": 0, + "content": "context-dependent questions:" + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.746, + 0.483, + 0.775 + ], + "angle": 0, + "content": "- The question has one or more demonstrative pronouns, including \"this\", \"these\", and \"those\"." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.776, + 0.483, + 0.806 + ], + "angle": 0, + "content": "- The question has one or more personal pronouns, including \"she\", \"he\", \"her\", \"his\", and \"him\"." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.806, + 0.483, + 0.836 + ], + "angle": 0, + "content": "- The question has one or more specific keywords, including \"the document\" and \"mention\"." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.837, + 0.483, + 0.852 + ], + "angle": 0, + "content": "- The question does not contain entities except for numbers." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.852, + 0.361, + 0.867 + ], + "angle": 0, + "content": "- The question is shorter than six words." + }, + { + "type": "list", + "bbox": [ + 0.091, + 0.746, + 0.483, + 0.867 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.871, + 0.483, + 0.902 + ], + "angle": 0, + "content": "Any samples matching at least one of these rules were removed from our dataset. After applying the rules, we" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.727, + 0.907, + 0.848 + ], + "angle": 0, + "content": "manually reviewed all the questions to ensure context-independence, guided by the instruction: \"When you see the question without a given document, can you find a unique document in the corpus to provide a unique answer?\" To validate our review, we randomly sampled 50 questions with their gold and top-5 retrieved documents (from VDocRetriever) and found no ambiguous cases, confirming the high quality of our process." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.871, + 0.907, + 0.902 + ], + "angle": 0, + "content": "Prompts for creating multi-hop questions. Table B shows the prompt for combining two single-hop questions" + } + ], + [ + { + "type": "table", + "bbox": [ + 0.19, + 0.089, + 0.808, + 0.242 + ], + "angle": 0, + "content": "
DatasetTask Description
DocVQAYou have to find an industry document that answers my question.
InfoVQAGiven a question, retrieve an infographic to answer the question.
VisualMRCI'm looking for a screenshot image that answers the question.
ChartQAGiven a user query, retrieve a chart image that answers the query.
OpenWikiTableGiven a user query, retrieve a table image for answering the question.
DUDEYou need to retrieve evidence from a PDF page to address the question.
MPMQAI want to know the answer to the question. Can you find evidence from manual pages?
SlideVQAGiven a question, retrieve a slide image to answer the question.
MHDocVQAGiven a multihop-question, retrieve multiple pages that can help answer the question.
" + }, + { + "type": "table_caption", + "bbox": [ + 0.324, + 0.253, + 0.673, + 0.266 + ], + "angle": 0, + "content": "Table D. Instructions in the visual document retrieval task." + }, + { + "type": "table", + "bbox": [ + 0.1, + 0.291, + 0.477, + 0.441 + ], + "angle": 0, + "content": "
ModelModel Checkpoint
Contrieverfacebook/contriever-msmarco
E5intfloat/e5-base-v2
GTEthenlper/gte-base
E5-Mistralintfloat/e5-mistral-7b-instruct
NV-Embed-v2nvidia/NV-Embed-v2
CLIPopenai/clip-vit-large-patch14-336
DSETevatron/dse-phi3-docmatix-v1
VisRAG-Retopenmbv/VisRAG-Ret
Phi3Vmicrosoft/Phi-3-vision-128k-instruct
Idefics3HuggingFaceM4/Idefics3-8B-Llama3
" + }, + { + "type": "table_caption", + "bbox": [ + 0.13, + 0.451, + 0.443, + 0.466 + ], + "angle": 0, + "content": "Table E. Model checkpoints stored on HuggingFace." + }, + { + "type": "table", + "bbox": [ + 0.183, + 0.478, + 0.395, + 0.62 + ], + "angle": 0, + "content": "
HyperparametersValue
Learning Rate1e-4
Gradient Accumulation4
Adam W β10.9
Adam W β20.999
LoRA Attention Dimension r8
LoRA Scaling Alpha64
LoRA Dropout0.1
LoRA Target*.proj
BF16True
" + }, + { + "type": "table_caption", + "bbox": [ + 0.098, + 0.63, + 0.475, + 0.644 + ], + "angle": 0, + "content": "Table F. Hyperparameters used for pre-training and fine-tuning." + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.67, + 0.483, + 0.703 + ], + "angle": 0, + "content": "to generate multi-hop questions. Moreover, Table C shows the prompt for filtering the generated multi-hop questions." + }, + { + "type": "title", + "bbox": [ + 0.091, + 0.714, + 0.296, + 0.731 + ], + "angle": 0, + "content": "B. Experimental Details" + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.74, + 0.483, + 0.8 + ], + "angle": 0, + "content": "Instruction templates. Following a standard LLM-based retrieval training and evaluation strategy [60], we applied natural language instruction templates to the original question for the visual document retrieval task:" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.812, + 0.454, + 0.83 + ], + "angle": 0, + "content": "Instruct: {task description} \\n Query: {question}," + }, + { + "type": "text", + "bbox": [ + 0.09, + 0.84, + 0.484, + 0.903 + ], + "angle": 0, + "content": "where “{task description}” is a placeholder for a one-sentence task description as shown in Table D. Note that the instruction format was applied to only LLM-based retrievers, including E5-Mistral [60], NV-Embed-v2 [30]," + }, + { + "type": "table", + "bbox": [ + 0.518, + 0.291, + 0.905, + 0.373 + ], + "angle": 0, + "content": "
Max Image ResolutionRetrievalANLSQA Generation Time
nDCG@5Encoding Time
336×33628.785.037.2394.5
672×67272.8106.442.7490.9
1344×134472.9204.456.2789.7
" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.383, + 0.907, + 0.427 + ], + "angle": 0, + "content": "Table G. Impact of image resolution on InfoVQA under the single-pool setting. Average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.452, + 0.909, + 0.59 + ], + "angle": 0, + "content": "DSE [37], Phi3 [1], and VDocRetriever. Our preliminary experiments observed that using the instruction during both training and evaluation improved the performance of LLM-based retrievers. However, applying the same instruction format to non-LLM-based retrievers, such as Contriever [22], resulted in a performance decline due to lacking instruction-following capabilities. Furthermore, we appended an instruction regarding the desired output format for the DocumentVQA task:" + }, + { + "type": "text", + "bbox": [ + 0.647, + 0.601, + 0.774, + 0.618 + ], + "angle": 0, + "content": "\\(\\backslash\\) n Answer briefly." + }, + { + "type": "text", + "bbox": [ + 0.513, + 0.636, + 0.907, + 0.669 + ], + "angle": 0, + "content": "Model checkpoints Table E shows model initialization checkpoints stored on HuggingFace1." + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.686, + 0.906, + 0.717 + ], + "angle": 0, + "content": "Model hyperparameters Table F lists hyperparameters in pre-training and fine-tuning used for our models." + }, + { + "type": "title", + "bbox": [ + 0.513, + 0.731, + 0.828, + 0.749 + ], + "angle": 0, + "content": "C. Additional Experimental Analysis" + }, + { + "type": "text", + "bbox": [ + 0.512, + 0.756, + 0.907, + 0.879 + ], + "angle": 0, + "content": "How does image resolution impact performance? Table G shows that increasing image resolution improved the model's capability to understand and encode the document; however, it also significantly increased the inference time for both retrieval and QA tasks. Moreover, the performance in the QA task exhibited greater sensitivity to image resolution compared to the retrieval task, indicating that the QA task demands more detailed visual understanding." + }, + { + "type": "page_footnote", + "bbox": [ + 0.532, + 0.887, + 0.716, + 0.901 + ], + "angle": 0, + "content": "1https://huggingface.co" + } + ], + [ + { + "type": "image", + "bbox": [ + 0.104, + 0.09, + 0.473, + 0.233 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.092, + 0.244, + 0.483, + 0.273 + ], + "angle": 0, + "content": "Figure C. QA performance with various top-k on InfoVQA under the single-pool setting. () denotes document sources." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.299, + 0.483, + 0.39 + ], + "angle": 0, + "content": "How many retrieved documents to augment? Figure C shows that incorporating three documents yielded the best results in VDocRAG. While adding a few documents may include helpful contexts, adding more low-ranked or randomly sampled documents introduces noise and deteriorates generation due to the imperfections of retrievers." + }, + { + "type": "text", + "bbox": [ + 0.091, + 0.409, + 0.483, + 0.514 + ], + "angle": 0, + "content": "Additional qualitative results. Figure D shows qualitative results of VDocRAG compared to text-based RAG. VDocRAG demonstrates significant performance advantages in understanding layouts and visual content, such as tables, charts, figures, and diagrams. These findings highlight the critical role of representing documents as images to improve the performance of the RAG framework." + } + ], + [ + { + "type": "title", + "bbox": [ + 0.374, + 0.153, + 0.454, + 0.164 + ], + "angle": 0, + "content": "VDocRetriever" + }, + { + "type": "text", + "bbox": [ + 0.108, + 0.178, + 0.264, + 0.198 + ], + "angle": 0, + "content": "How many apps does the company which makes Clash of Clans make?" + }, + { + "type": "text", + "bbox": [ + 0.113, + 0.22, + 0.191, + 0.23 + ], + "angle": 0, + "content": "Ground-truth: 7" + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.239, + 0.209, + 0.249 + ], + "angle": 0, + "content": "Text-based RAG: 61" + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.237, + 0.237, + 0.248 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.114, + 0.255, + 0.178, + 0.266 + ], + "angle": 0, + "content": "VDocRAG: 7" + }, + { + "type": "image", + "bbox": [ + 0.222, + 0.255, + 0.237, + 0.267 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.318, + 0.176, + 0.361, + 0.188 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.19, + 0.401, + 0.273 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.468, + 0.176, + 0.514, + 0.188 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.414, + 0.19, + 0.559, + 0.274 + ], + "angle": 0, + "content": null + }, + { + "type": "title", + "bbox": [ + 0.68, + 0.152, + 0.789, + 0.163 + ], + "angle": 0, + "content": "Text-based Retriever" + }, + { + "type": "text", + "bbox": [ + 0.641, + 0.176, + 0.663, + 0.188 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.668, + 0.176, + 0.682, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.591, + 0.189, + 0.728, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.791, + 0.176, + 0.813, + 0.188 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.816, + 0.176, + 0.836, + 0.188 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.766, + 0.196, + 0.865, + 0.207 + ], + "angle": 0, + "content": "Top Free iOS App Earners" + }, + { + "type": "image", + "bbox": [ + 0.767, + 0.208, + 0.889, + 0.272 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.111, + 0.307, + 0.25, + 0.338 + ], + "angle": 0, + "content": "What is the Stream Source for the API which uses Java, Scala, and Python?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.349, + 0.26, + 0.36 + ], + "angle": 0, + "content": "Ground-truth: HDFS, Network" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.368, + 0.221, + 0.379 + ], + "angle": 0, + "content": "Text-based RAG: Fink" + }, + { + "type": "image", + "bbox": [ + 0.242, + 0.368, + 0.26, + 0.379 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.385, + 0.245, + 0.395 + ], + "angle": 0, + "content": "VDocRAG: HDFS, Network" + }, + { + "type": "image_caption", + "bbox": [ + 0.329, + 0.302, + 0.372, + 0.314 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.266, + 0.318, + 0.419, + 0.408 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.485, + 0.302, + 0.529, + 0.314 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.42, + 0.317, + 0.569, + 0.409 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.301, + 0.672, + 0.312 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.674, + 0.301, + 0.693, + 0.312 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.589, + 0.315, + 0.734, + 0.405 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.799, + 0.301, + 0.822, + 0.312 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "text", + "bbox": [ + 0.766, + 0.322, + 0.869, + 0.33 + ], + "angle": 0, + "content": "The Reactive Streams Initiative" + }, + { + "type": "text", + "bbox": [ + 0.749, + 0.332, + 0.89, + 0.398 + ], + "angle": 0, + "content": "Reactive Streams is an initiative to provide a standard for asynchronous stream processing with non-blocking back pressure on the JVM \nProblem Scope \nHandling streams of (live) data in an asynchronous and possibly non-blocking way Finding a minimal API describing the operations available on Reactive Streams \nImplementers \nRxlava \nAkka Streams \nReactor Composable Ratpack" + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.444, + 0.243, + 0.465 + ], + "angle": 0, + "content": "Which is Microsoft's biggest acquisition to date?" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.487, + 0.22, + 0.498 + ], + "angle": 0, + "content": "Ground-truth: Skype" + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.505, + 0.235, + 0.515 + ], + "angle": 0, + "content": "Text-based RAG: Oculus" + }, + { + "type": "image", + "bbox": [ + 0.237, + 0.505, + 0.254, + 0.516 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.121, + 0.522, + 0.207, + 0.534 + ], + "angle": 0, + "content": "VDocRAG: Skype" + }, + { + "type": "image", + "bbox": [ + 0.238, + 0.521, + 0.255, + 0.532 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.382, + 0.438, + 0.425, + 0.45 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.285, + 0.451, + 0.43, + 0.539 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.474, + 0.45, + 0.544, + 0.538 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.652, + 0.438, + 0.673, + 0.449 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "text", + "bbox": [ + 0.594, + 0.454, + 0.724, + 0.533 + ], + "angle": 0, + "content": "Increasing number of prominent successful exits\n• Oracle TRUE2014\n• Acquired (Prestige) IPO Acquired (Oracle)\n• Acquired (Open) IPO\n• Gravity baily inktank + CDO\n• Acquired (AOL) Acquired (Apple) Acquired (Red Hat) Acquired (Open) Acquired (Oracle)\n• $8B+ in 2014 so far with more to come\nupfront" + }, + { + "type": "image_caption", + "bbox": [ + 0.802, + 0.438, + 0.826, + 0.449 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.828, + 0.438, + 0.847, + 0.449 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.74, + 0.451, + 0.894, + 0.534 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.574, + 0.258, + 0.595 + ], + "angle": 0, + "content": "How many layers are used in the gloves for the DPE suit?" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.617, + 0.214, + 0.627 + ], + "angle": 0, + "content": "Ground-truth: Three" + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.635, + 0.221, + 0.646 + ], + "angle": 0, + "content": "Text-based RAG: Two" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.635, + 0.251, + 0.646 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.117, + 0.652, + 0.2, + 0.662 + ], + "angle": 0, + "content": "VDocRAG: Three" + }, + { + "type": "image", + "bbox": [ + 0.234, + 0.652, + 0.251, + 0.662 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.379, + 0.568, + 0.421, + 0.579 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.301, + 0.58, + 0.557, + 0.666 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.649, + 0.568, + 0.668, + 0.579 + ], + "angle": 0, + "content": "Top" + }, + { + "type": "image", + "bbox": [ + 0.586, + 0.58, + 0.732, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.799, + 0.568, + 0.819, + 0.579 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.741, + 0.58, + 0.892, + 0.672 + ], + "angle": 0, + "content": null + }, + { + "type": "text", + "bbox": [ + 0.116, + 0.709, + 0.224, + 0.729 + ], + "angle": 0, + "content": "What is the phase before full moon?" + }, + { + "type": "text", + "bbox": [ + 0.119, + 0.748, + 0.264, + 0.759 + ], + "angle": 0, + "content": "Ground-truth: Waxing Gibbous" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.766, + 0.244, + 0.777 + ], + "angle": 0, + "content": "Text-based RAG: New Mod" + }, + { + "type": "text", + "bbox": [ + 0.12, + 0.783, + 0.246, + 0.794 + ], + "angle": 0, + "content": "VDocRAG: Waxing Gibbous" + }, + { + "type": "image_caption", + "bbox": [ + 0.381, + 0.699, + 0.423, + 0.711 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.286, + 0.716, + 0.36, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image", + "bbox": [ + 0.37, + 0.716, + 0.564, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.651, + 0.699, + 0.697, + 0.711 + ], + "angle": 0, + "content": "Top1" + }, + { + "type": "image", + "bbox": [ + 0.585, + 0.716, + 0.726, + 0.793 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.801, + 0.699, + 0.823, + 0.71 + ], + "angle": 0, + "content": "Top2" + }, + { + "type": "image", + "bbox": [ + 0.732, + 0.717, + 0.807, + 0.797 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.81, + 0.698, + 0.848, + 0.71 + ], + "angle": 0, + "content": "op2" + }, + { + "type": "image", + "bbox": [ + 0.808, + 0.721, + 0.892, + 0.796 + ], + "angle": 0, + "content": null + }, + { + "type": "image_caption", + "bbox": [ + 0.248, + 0.826, + 0.749, + 0.841 + ], + "angle": 0, + "content": "Figure D. Additional qualitative results of VDocRAG compared to Text-based RAG." + } + ] +] \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_origin.pdf b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_origin.pdf new file mode 100644 index 0000000000000000000000000000000000000000..1f69523af94bc602bc28f5b8709b88c87d2c99e6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/9aa4a651-296e-45c5-b9c8-a1e5a1bcd6ae_origin.pdf @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d957575ceaa68f104871123c84811b292dc2a4d80525c8edc7501842c25f5aed +size 16597825 diff --git a/data/2025/2504_09xxx/2504.09795/full.md b/data/2025/2504_09xxx/2504.09795/full.md new file mode 100644 index 0000000000000000000000000000000000000000..2981f82e9350ee1d123368b079b29cd29efee2f3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/full.md @@ -0,0 +1,588 @@ +Ryota Tanaka $^{1,2}$ Taichi Iki $^{1}$ Taku Hasegawa $^{1}$ Kyosuke Nishida $^{1}$ Kuniko Saito $^{1}$ Jun Suzuki $^{2}$ $^{1}$ NTT Human Informatics Laboratories, NTT Corporation +https://vdocrag.github.io + +# Abstract + +We aim to develop a retrieval-augmented generation (RAG) framework that answers questions over a corpus of visually-rich documents presented in mixed modalities (e.g., charts, tables) and diverse formats (e.g., PDF, PPTX). In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied documents and modalities in a unified image format to prevent missing information that occurs by parsing documents to obtain text. To improve the performance, we propose novel self-supervised pre-training tasks that adapt large vision-language models for retrieval by compressing visual information into dense token representations while aligning them with textual content in documents. Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain document visual question answering datasets, encompassing diverse document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments show that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization capability, highlighting the potential of an effective RAG paradigm for real-world documents. + +# 1. Introduction + +Large language models (LLMs) have demonstrated impressive performance on diverse natural language tasks [2, 16, 24, 55]. These models struggle with factual errors despite their increased model and data scale [39, 40]. To remedy this problem, retrieval-augmented generation (RAG) methods [18, 31] can retrieve knowledge from an external corpus, potentially reducing hallucination and increasing knowledge coverage. Most previous RAG frameworks assume the context is composed entirely of text, with no graphical elements. In contrast, a significant amount of real-world information is stored in visually-rich documents, such as charts, tables, web pages, and office documents. These documents often contain both textual and visual objects, with content spread structurally across various loca + +![](images/dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg) +Figure 1. Our framework of VDocRAG and examples from OpenDocVQA. VDocRAG consists of VDocRetirver and VDocGenerator, which can retrieve relevant documents and generate answers by understanding the original appearance of documents. + +tions depending on diverse formats and types. + +Thus, document visual question answering (DocumentVQA) [42, 43, 56, 57] aims to build an agent capable of reading and comprehending document images to answer the question. Here, most existing DocumentVQA questions operate in a closed setting without requiring any retrieval. While this definition simplifies the QA model, it does not reflect many real-world use cases where the question is asked through some open-domain natural language interface, such as QA systems searching information across in-house documents or customer service chatbots on e-commerce websites. To address this limitation, recent works have introduced retrieval tasks on document images [17, 37]. However, these cannot fully develop models that effectively integrate the retrieved information into the final output. This gap hinders the application of DocumentVQA models in more realistic, open-domain scenarios. + +In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied docu + +ments and modalities in a unified image format to avoid tedious parsing and potential information loss that occurs in conventional text-based RAG. As depicted in Figure 1, VDocRAG consists of two main components, both of which effectively leverage the visual features of documents. First, VDocRetriever retrieves document images related to the question from a corpus of document images. Second, VDocGenerator uses these retrieved images to generate the answer. To encode document images and interact with the encoded information, we adapt pre-trained large vision language models (LVLMs) [1, 29] as the backbone for VDocRAG. Since LVLMs are inherently generative models, it is sub-optimal for embeddings as they prevent the representations from capturing information across the entire input sequence due to the training objective (i.e., next-token prediction). To bridge this gap, we introduce new self-supervised pre-training tasks that harness the understanding and generation capabilities of LVLMs to enhance representation learning. Specifically, we compress the entire image representation into a dense token representation, by aligning the text in documents via retrieval and generation tasks. + +Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain DocumentVQA datasets encompassing a wide range of document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments demonstrate that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization performance. + +Our main contributions are summarized as follows: + +- We introduce a new RAG framework, VDocRAG, which can directly understand diverse real-world documents purely from visual features. +- We are the first to explore pre-training tasks designed for document retrieval-oriented adaptation of LVLMs, by compressing visual document representations. +- We introduce OpenDocVQA, the first unified open-domain DocumentVQA dataset with diverse documents. + +# 2. Related Work + +Retrieval-augmented generation (RAG). RAG in the NLP community aims at retrieving external knowledge to reduce factual errors and enhance performance in various knowledge-intensive tasks [3, 5, 39, 40, 49]. Inspired by the success of RAG in NLP, this technique has also applied applications across different domains, including images [8, 50, 51, 64], codes [45, 70], videos [7, 61], audio [26, 62], and 3D [53, 69]. However, most existing works have focused on retrieving knowledge from only plain-text documents or non-text media. In contrast, we tackle the challenge of extracting knowledge from visually-rich documents organized in complex, multimodal formats. + +Visual document retrieval and visual RAG. With the success of LLMs, there is a growing trend to build large vision language models (LVLMs) that integrate image understanding capabilities by combining image encoders [32, 48, 67] with LLMs [1, 10, 29, 33, 35, 58]. Concurrent works in visual document retrieval [13, 17, 37] and visual RAG [9, 38, 66] leverage LVLMs to directly encode visually-rich documents through images. However, these approaches have trouble understanding diverse real-world documents due to the limitations of their datasets and training strategies. The existing visual document retrieval dataset, ViDoRe [37], contains questions that might not require retrieval and handles a limited number of document types, resulting in a gap between real-world scenarios. In contrast, our dataset covers open document types and provides questions that are verified by humans to require retrieval and to have context-independent conditions for the retrieval. From the perspective of training, despite the significant gap between generative pre-training tasks and retrieval tasks in LVLMs, previous works [9, 17, 37, 38, 66] leverage LVLMs without specific training for bridging the gap. To address this, we introduce pre-training tasks that transfer the understanding and generation capabilities of LVLMs to retrievers. + +Document visual question answering (DocumentVQA). DocumentVQA is a high-level document understanding task that involves answering questions on visually-rich documents. These documents include a variety of elements, such as handwritten and digital text [42, 56], complex layouts [28, 68, 71], and graphical elements [41, 43, 57]. However, previous studies have assumed closed settings that do not require retrieval, except for Dureader_vis [46]. Our work differs from Dureader_vis as follows. First, OpenDocVQA covers a wide range of document formats and domains, while Dureader_vis focuses on screenshots of websites, limiting its generalizability. Second, OpenDocVQA reflects more real-world scenarios that require both single- and multi-hop reasoning over documents, while Dureader_vis requires only single-hop reasoning. Lastly, even lexical search methods yield sufficient performance in Dureader_vis due to its reliance on textual content. In contrast, OpenDocVQA requires a visual semantic search where visual and contextual information can be exploited. + +# 3. OpenDocVQA Task and Dataset + +# 3.1. Task Formulation + +Given a large collection of $N$ document images $\mathcal{I} = \{I_1,\dots,I_N\}$ and a question $Q$ , the goal of OpenDocVQA task is to output an answer $A$ by finding the relevant $k$ images $\hat{\mathcal{I}}\in \mathcal{I}$ , where $k\ll N$ . We decompose the task into two stages. Visual document retrieval: given $Q$ and $\mathcal{I}$ , + +![](images/f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg) +Figure 2. Process of creating multi-hop DocumentVQA questions. + +the model retrieves the relevant $k$ images $\hat{\mathcal{I}}$ from which to derive the answer. DocumentVQA: the model takes $Q$ and the retrieved images $\hat{\mathcal{I}}$ as input, to generate $A$ . + +OpenDocVQA covers multiple open-domain DocumentVQA datasets with diverse document types. To reflect real-world scenarios, we evaluate models with both single-pool and all-pool settings. In the single-pool setting, retrieval is performed from a specific pool of documents provided by each original dataset. The all-pool setting requires retrieving from the entire candidate pool, which includes documents from a wide range of domains. + +# 3.2. Dataset Collection + +Filtering of DocumentVQA datasets. We collected and filtered instances of seven existing document VQA datasets [28, 41-43, 56, 57, 68]. Most of their questions are context-dependent conditions, where they cannot be answered without referencing the accompanying document (e.g., What is the title?). Therefore, we filtered out questions lacking sufficient context for retrieval. To address this, we initially applied heuristic rules to automatically select likely context-independent questions, reducing the pool by $20.9\%$ . Then, we manually reviewed and verified the remaining examples to ensure their context independence. + +Reformulation of TableQA dataset. We used QA pairs from Open-WikiTable [27], an open-domain TableQA dataset that required retrieving tables from Wikipedia to answer the question. Since the original dataset provides tables in only textual format (HTML data), we took the screenshot images of tables from the corresponding Wikipedia pages to reformulate the task as the OpenDocVQA. + +Creation of new multi-hop questions. To enhance the model's ability to interact with multiple document sources (e.g., charts and tables), we semi-automatically created a multi-hop DocumentVQA dataset, MHDocVQA, using the single-hop QA pairs collected in the previous steps. As shown in Figure 2, the creating process involved the following steps: (1) We first used spaCy [19] to identify a bridge + +
ViDoRe [17]Dureadervis [46]OpenDocVQA
Retrieval
QA
Context-Independent
Visual Semantic Search
Multi-Hop
Document ContentsT, L, F, C, DT, LT, L, F, C, D
Answer Types-ExtExt, Abs
#Document Types61Open
#QAs3,81015,00043,474
#Images (Pages)8,310158,000206,267
+ +Table 1. Comparison of related datasets. Document contents include (T)able, (L)ist, (F)igure, (C)hart, and (D)iagram. Answer types are Extractive (Ext) and Abstractive (Abs). + +entity (e.g., Denmark) in the answer to a single-hop question and then searched for this entity in other single-hop questions. (2) Next, we used Mixtral-8x22B [24] to combine the two single-hop questions. (3) We filtered the generated multi-hop questions using another LLM (GPT-4o [2]), which answered the questions based on the context of the two initial single-hop questions and their answers. If the predicted answer was the same as the answer to the second single-hop question, the multi-hop question was validated. Finally, we manually reviewed the filtered questions to ensure their quality before including them in our dataset. + +Negative candidates mining. We produced negative image candidates for retrievers to sift through for every question, used only during inference. We first extracted OCR text from images in the COYO-700M dataset [6], a web-scaled image collection. Subsequently, we mined negative images where the OCR text exhibits high lexical overlap with the question but does not contain the correct answer. + +# 3.3. Comparison with Related Datasets + +Table 1 shows the statistics of OpenDocVQA and other related datasets, including ViDoRe [17] and Dureader_vis [46]. OpenDocVQA has three unique key properties: First, it is the first large-scale collection of open-domain DocumentVQA datasets to address open document types, whereas ViDoRe considers six document types for only the retrieval task and Dureader_vis is limited to webpages. Second, the questions in OpenDocVQA are context-independent and require visual semantic search, whereas ViDoRe's questions are context-dependent, and even lexical search methods yield sufficient performance in Dureader_vis. This indicates our dataset better reflects real-world scenarios. Lastly, unlike ViDoRe and Dureader_vis, OpenDocVQA requires multi-hop reasoning with extractive (e.g., span, list) and abstractive (e.g., arithmetic, counting, no answer) answer types, providing a more challenging setting. + +![](images/621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg) +Figure 3. Overview of our VDocRAG model. VDocRetriever retrieves document images related to the question from a corpus of document images, and VDocGenerator uses these retrieved images to generate the answer. + +![](images/fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg) + +# 4. Proposed Model + +# 4.1. Architecture Overview + +As shown in Figure 3, VDocRAG consists of two components: VDocRetriever and VDocGenerator. Our approach adopts the pre-trained LVLMs to unify the varied formats and modalities in a single form as an image for direct document understanding. + +Dynamic high-resolution image encoding. To encode high-resolution images with various aspect ratios, a dynamic cropping [14, 65] is utilized to split the image into smaller patches while maintaining the integrity of the original aspect ratio. Each patch is a small image with $336 \times 336$ size, and we treat them as individual inputs for the image encoder. After encoding images, we convert them via a projector (two-layer MLP) into visual document features $\mathbf{z}_{\mathrm{d}}$ . + +VDocRetriever. VDocRetriever is an LVLM-based dual-encoder architecture that encodes queries and document images independently. We append an $<\mathrm{EOS}>$ token to the end of the question and visual document features $\mathbf{z}_{\mathrm{d}}$ , and then feed them into the LLM to obtain the question and visual document embeddings $(\mathbf{h}_{\mathrm{q}}, \mathbf{h}_{\mathrm{d}})$ by taking the last layer $<\mathrm{EOS}>$ vector. Then, it retrieves $k$ documents $\hat{\mathcal{I}}$ with the $k$ highest similarity scores to the question. Formally, the similarity scores between the question and visual document embeddings are computed via maximum inner product search [15], as follows: $\mathrm{SIM}(\mathbf{h}_{\mathrm{q}}, \mathbf{h}_{\mathrm{d}}) = \frac{\mathbf{h}_{\mathrm{q}}^{\top} \mathbf{h}_{\mathrm{d}}}{\|\mathbf{h}_{\mathrm{q}}\| \|\mathbf{h}_{\mathrm{d}}\|}$ . + +VDocGenerator. VDocGenerator adapts LVLM to generate answers $A$ given the question $Q$ and the retrieved $k$ documents $\hat{\mathcal{I}}$ obtained from VDocRetriever. After encoding the retrieval result, we concatenate the question and the encoded result, then feed this combined input into the LLM. + +# 4.2. Self-Supervised Pre-training Tasks + +Figure 4a and 4b show our pre-taining tasks in VDocRetriever. The goal of pre-training is to transfer the powerful understanding and generation abilities of LVLMs to facilitate their usage in visual document retrieval. To this end, we propose two new self-supervised pre-training tasks to compress the entire image representation into the token at the end of the input image. Our pre-training process passes the document image, and its extracted OCR text is used as a pseudo target. Full pre-training objectives is defined as $\mathcal{L} = \mathcal{L}_{\mathrm{RCR}} + \mathcal{L}_{\mathrm{RCG}}$ . + +Representation Compression via Retrieval (RCR). We compress image representations with a contrastive learning task that retrieves images relevant to their corresponding OCR text, by leveraging LVLM's image understanding capabilities. As shown in Figure 4a, we first construct positive OCR text-image pairs $(\mathbf{h}_0,\mathbf{h}_{\mathrm{d}^+})$ from raw unlabeled document images. Then, we adopt in-batch negatives to calculate the contrastive loss by InfoNCE [44] as follows: + +$$ +\mathcal {L} _ {\mathrm {R C R}} = - \log \frac {\exp \left(\operatorname {S I M} \left(\mathbf {h} _ {\mathrm {o}}, \mathbf {h} _ {\mathrm {d} ^ {+}}\right) / \tau\right)}{\sum_ {i \in \mathcal {B}} \exp \left(\operatorname {S I M} \left(\mathbf {h} _ {\mathrm {o}}, \mathbf {h} _ {\mathrm {d} _ {i}}\right) / \tau\right)}, \tag {1} +$$ + +where $\tau$ is a temperature hyperparameter to scale the logits, and $\mathcal{B}$ represents the batch size. + +Representation Compression via Generation (RCG). We propose a representation training strategy that leverages the generative capabilities of LVLMs through a customized attention mask matrix. As depicted in Figure 4b, representations for the image tokens, including the token, are obtained via a standard auto-regressive process. In contrast, for the subsequent L OCR token representations, we mask the image token representations and allow only the attention of token and the preceding OCR tokens. This approach facilitates pooling the image representations + +![](images/dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg) +Trainable + +![](images/5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg) + +![](images/9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg) +(a) Representation Compression via Retrieval (RCR) + +![](images/9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg) +Self-Supervised Pre-training +(b) Representation Compression via Generation (RCG) + +![](images/fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg) +Supervised Fine-tuning +(c) Visual Document Retrieval +Figure 4. Our pre-training tasks using unlabeled documents and fine-tuning in VDocRetriever. The RCR task retrieves relevant images given corresponding OCR tokens, and the RCG task outputs OCR tokens by paying attention to only the token. + +
DatasetDocuments%Filtered#Images#Train&Dev#Test
DocVQA [42]Industry84.812,7676,382-
InfoVQA [43]Infographic61.25,4859,5921,048
VisualMRC [56]Webpage71.910,2296,126-
ChartQA [41]Chart94.020,882-150
OpenWikiTable [27]Table0.01,2574,261-
DUDE [28]Open92.327,9552,135496
MPMQA [68]Manual81.710,0183,054-
SlideVQA [57]\$Slide66.752,380-760
MHDocVQA\$Open9.528,5509,470-
+ +Table 2. Datasets in OpenDocVQA. $\S$ denotes datasets requiring multi-hop reasoning. Note that MHDocVQA was created using only the training datasets. + +into $<\mathsf{EOS}>$ token. The loss function is defined as: + +$$ +\mathcal {L} _ {\mathrm {R C G}} = - \frac {1}{L} \sum_ {i = 1} ^ {L} \log p \left(y _ {i} \mid y _ {< i}, < \mathrm {E O S} >\right), \tag {2} +$$ + +where $y_{i}$ denotes the $i$ -th token of the OCR. + +# 4.3. Supervised Fine-tuning + +We first fine-tune the VDocRetriever with the contrastive learning objective using query-document pairs with in-batch negatives (see Figure 4c). Then, we apply the trained VDocRetriever to search over the corpus $\mathcal{I}$ to feed the top-k documents into the VDocGenerator. Finally, we train the VDocGenerator using the next-token prediction objective. + +# 5. Experiments + +# 5.1. Experimental Setup + +Pre-training dataset. For pre-training, we gathered 500k samples containing document image and OCR text pairs filtered from the DocStruct4M [20]. We excluded any images that appeared in the test set to avoid data contamination. + +Fine-tuning and evaluation datasets. We evaluated our models in both zero-shot and supervised settings. The zero-shot evaluation assessed the models' generalization capabilities on unseen datasets, while the supervised evaluation measured performance when training samples were available. As shown in Table 2, we trained our models on seven datasets and evaluated them on four datasets, including ChartQA and SlideVQA in the zero-shot setting, and InfoVQA and DUDE in the supervised setting. + +Implementation details. We initialized VDocRAG with Phi3V [1], a state-of-the-art LVLM trained on high-resolution images and multi-image data. The parameters of VDocRetriever and VDocGenerator were not shared. We employed LoRA [21] with LLM while keeping other parameters frozen during training. We trained VDocRAG for one epoch on eight A100-80G GPUs with AdamW [36] optimizer and FlashAttention [11], using batch sizes of 16 for pre-training and 64 for fine-tuning. We set the temperature $\tau$ to 0.01. We applied Tesseract [54] to extract OCR text in images. By default, we used the top three documents obtained from VDocRetirver. + +Retrieval baselines. We compared VDocRetriever with two categories of retrievers. The first category includes off-the-shelf text retrieval models on extracted text and image retrieval models. These consist of BM25 [52], a lexical matching model; Contriver [22], E5 [59], and GTE [34], which are popular strong text embedding models based on BERT [12]; E5-Mistral [60] and NV-Embedv2 [30], which are state-of-the-art LLM-based embedding models; CLIP [47], a dual-encoder vision-language model; DSE [37] and VisRAG-Ret [66], which are state-of-the-art visual document retrieval models. The second category includes fine-tuned models trained on OpenDocVQA. To + +
ModelInitDocsScale#PT#FTChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Off-the-shelf
BM25 [52]-Text00054.815.640.738.750.231.357.247.5
Contriever [22]BERT [12]Text110M1B500K66.959.350.846.542.521.040.629.7
E5 [59]BERT [12]Text110M270M1M74.966.353.649.649.226.945.038.9
GTE [34]BERT [12]Text110M788M3M72.864.755.449.151.332.542.436.0
E5-Mistral [60]Mistral [23]Text7.1B01.85M72.370.063.857.660.333.952.245.2
NV-Embed-v2 [30]Mistral [23]Text7.9B02.46M75.370.761.758.156.534.243.038.6
CLIP [47]ScratchImage428M400M054.638.638.129.745.320.623.217.6
DSE [37]Phi3V [1]Image4.2B05.61M72.768.573.067.267.449.655.547.7
VisRAG-Ret [66]MiniCPM-V [63]Image3.4B0240K87.2*75.5*74.3*68.4*71.9*51.7*56.444.5
Trained on OpenDocVQA
Phi3 [1]Phi3V [1]Text4B041K72.565.353.348.453.2*33.0*40.5*32.0*
VDocRetriever†Phi3V [1]Image4.2B041K84.2+11.774.8+9.571.0+17.765.1+16.766.8*+13.652.8*+19.848.4*+7.941.0*+9.0
VDocRetrieverPhi3V [1]Image4.2B500K41K86.0+1.876.4+1.677.3+6.373.3+8.272.9*+6.155.5*+2.757.7*+9.350.9*+9.9
+ +Table 3. Retrieval results under the single- (Single) and all-pool (All) settings. * indicates performance on test data for which corresponding training samples are available. All other results represent zero-shot performance. Init, FT, and PT denote the initialization model, finetuning, and pre-training, respectively. Performance gains in green and blue are compared to the base LLM and VDocRetirver†, respectively. + +
GeneratorRetrieverDocsChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Closed-book
Phi3--20.020.020.320.334.9*34.9*23.1*23.1*
Text-based RAG
Phi3Phi3Text28.028.028.628.040.5*39.1*40.1*35.7*
Phi3GoldText36.636.627.827.845.6*45.6*55.9*55.9*
VDocRAG (Ours)
VDocGeneratorVDocRetrieverImage52.0+24.048.0+20.044.2+15.642.0+14.056.2*+15.749.2*+10.148.5*+8.444.0*+8.3
VDocGeneratorGoldImage74.074.056.456.464.6*64.6*66.4*66.4*
+ +Table 4. DocumentVQA results. All models are fine-tuned on OpenDocVQA. The results marked with * denote performance on unseen test samples, and the other results represent zero-shot performance. The performance gain in green is compared to the text-based RAG that has the same base LLM. Gold knows the ground-truth documents. Models answer the question based on the top three retrieval results. + +verify the effectiveness of encoding documents through images, we fine-tuned the LLM in VDocRetriever (Phi3 [1]) using extracted text to represent documents. Additionally, we included a variant of VDocRetriever without pretraining (VDocRetriever†). + +QA baselines. We compared VDocRAG against closed-book and text-based RAG models. These baselines used the same model initialization as VDocRAG but fine-tuned only the LLM (Phi3). The closed-book model received only the question as input, while the text-based RAG used the top three documents retrieved by the Phi3 retriever. Moreover, we assessed possible upper-bound performance by testing generation with ground-truth (Gold) documents. + +Evaluation metrics. We evaluated retrieval performance using nDCG@5, a widely used metric in information retrieval [17, 25]. For the DocumentVQA task, we followed the evaluation protocol of each dataset, we used ANLS [4] for InfoVQA and DUDE, Relaxed Accuracy [41] for + +ChartQA, F1 for SlideVQA as evaluation metrics. + +# 5.2. Retrieval Results + +Table 3 shows that VDocRetriever† achieved significantly higher retrieval performance than the text-based Phi3 retriever on all datasets under the same conditions. This indicates that our model can effectively encode documents in image format for retrieval tasks. Furthermore, VDocRetriever exhibits superior zero-shot generalization on unseen datasets, ChartQA and SlideVQA, outperforming both off-the-shelf text retrievers and state-of-the-art visual document retrieval models. Notably, DSE was initialized with the same LVLM as ours and fine-tuned on 13.7 times more data. This highlights that our pre-training strategy and the OpenDocVQA dataset offer unique advantages that are not adequately addressed by existing approaches. + +# 5.3. Retrieval-Augmented Generation Results + +Table 4 shows that VDocRAG significantly outperformed both the closed-book LLM and the text-based RAG on + +
ModelSlideVQAInfoVQA
VDocRetriever77.372.9
w/o RCR75.9-1.471.1-1.8
w/o RCG71.7-5.668.8-4.1
w/o RCG & RCR71.0-6.366.8-6.1
w/o LLM & Projector (→CLIP encoders)43.7-33.637.9-35.0
+ +Table 5. Ablation study of our pre-training tasks and model architecture in the retrieval task under the single-pool setting. + +
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
VDocRAG77.372.944.256.2
w/o MHDocVQA75.0-2.371.4-1.543.4-0.853.8-2.4
w/o except MHDocVQA68.8-8.561.7-11.241.1-3.144.0-12.2
+ +![](images/73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg) +(a) Retrieval performance +Figure 5. Performance under different document lengths on InfoVQA (single-pool setting). + +![](images/df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg) +(b) QA performance + +the DocumentVQA task, even when all models were the same initialization. Additionally, when the retrieval results were fixed to ground-truth (Gold) documents, VDocRAG demonstrated superior performance to text-based RAG. This underscores the importance of visual cues in extracting answers from documents and suggests that VDocGenerator has a higher upper-bound performance. Both text-based RAG and VDocRAG exhibited substantial improvements when provided with ground-truth documents, highlighting potential areas for enhancing retrieval accuracy and improving the generator's robustness to retrieval noise. + +# 5.4. Analysis + +Can our pre-training tasks be beneficial? Table 5 shows that VDocRetriever outperformed the model without pretraining. Removing each pre-training task or both RCG and RCR tasks decreased performance, indicating that both tasks contribute complementarily. These validate that our pre-training effectively learns to compress image features while aligning them with textual contents in images. + +Table 6. Ablation study of our dataset in retrieval and QA tasks under the single-pool setting. + +
ModelRetrievalQA
OCREncodingGenerationTotal
Text-based RAGphi3590.070.7422.71083.4
VDocRAG-204.4789.7994.1
+ +Table 7. Efficiency analysis on InfoVQA. The average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU. + +
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
Text-based RAGLLama360.161.837.849.5
VDocRAGIdefics373.472.548.959.9
w/o Pre-train70.369.847.259.6
+ +Table 8. Analysis with different LVLM (Idefics3) in retrieval and QA tasks under the single-pool setting. + +Does LLM help understanding document images? Table 5 shows that retrieval performance dropped substantially when the LLM block was removed, leaving only the CLIP text/vision encoder, even with the same visual transformer backbone. This suggests that LLM can capture finer-grained visual details and enhance semantic understanding. + +Does our dataset improve the performance? Table 6 shows that removing MHDocVQA caused a performance decrease, indicating that MHDocVQA requires distinct reasoning skills compared to other collected datasets in OpenDocVQA. Additionally, excluding all OpenDocVQA datasets except MHDocVQA led to a significant performance drop. This confirms that our collected datasets effectively supplement the missing capabilities of LVLM in document retrieval and understanding. + +How well does VDocRAG perform under different document lengths? Figure 5 shows that VDocRAG consistently outperforms text-based RAG, indicating that VDocRAG can better understand documents through visual information. In general, we observed that the VDocRAG's relative performance over text-based RAG is larger for images with 0-10 words (+66.0 in retrieval, +21.1 in QA) than for those with 500+ words (+28.4 in retrieval, +16.7 in QA). + +Is VDocRAG more efficient than text-based RAG? Table 7 shows that VDocRAG is more efficient than text-based RAG. Especially, VDocRAG requires $69\%$ less inference time to retrieve documents than text-based RAG. Although VDocRetriever takes more time for document encoding and generation, it eliminates the time-consuming OCR processing necessary for text-based RAG. + +![](images/e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg) +Figure 6. Qualitative results of VDocRAG compared to text-based RAG. + +![](images/957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg) +(a) VDocRAG answers correctly, but Text-based RAG answers incorrectly + +![](images/150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg) +(b) VDocRAG answers incorrectly, but Text-based RAG answers correctly +Figure 7. Root causes of correct and incorrect predictions. + +Can our method apply different LVLMs? To investigate the impact of different LVLMs on VDocRAG, we replaced Phi3V with Idefics3 [29], a state-of-the-art LVLM that uses Llama3-8B [16] as its backbone LLM. As observed in Table 8, the performance trend was consistent with that of Phi3V, highlighting the versatility and broad applicability of our method. + +Qualitative results. Figure 6 illustrates the performance of our model through qualitative examples. In the top example, VDocRAG demonstrates strong performance on a question requiring multi-hop reasoning and graph understanding across multi-page slides. In the bottom example, VDocRAG also performs better on a question that requires parsing on the table with cells spanning multiple rows and columns. In contrast, text-based RAG depends solely on OCR text information, leading to a superficial understanding of the text and incorrect predictions. + +Human evaluation. To better understand the prediction differences between VDocRAG and text-based RAG, we + +manually analyzed the generated outputs by identifying the root causes of 50 correct and 50 incorrect predictions, randomly sampled from test samples. Figure 7a shows that VDocRAG significantly enhances the understanding of visual data (e.g., charts). Conversely, Figure 7b reveals that VDocRAG encounters challenges with text-heavy documents (e.g., books), primarily due to the OCR capabilities. We observed that text-based RAG correctly answers questions when visual data includes long titles or subtitles, which have a high textual overlap with the question. These observations are in line with the results shown in Figure 5. + +# 6. Conclusion + +We introduced a new RAG framework, VDocRAG, which can directly understand various real-world documents. We enhanced VDocRAG with two key contributions: (1) pretraining tasks capable of learning image representation efficiently by leveraging the powerful capabilities of LVLMs, and (2) OpenDocVQA, the first unified open-domain DocumentVQA dataset that encompasses a wide range of visually-rich documents. Our holistic evaluations on four datasets show that VDocRAG significantly outperformed conventional text-based RAG, shedding light on the development of an effective RAG over real-world documents. + +Limitations. While we focused on pre-training to align images and OCR data for document retrieval, leveraging caption data instead of OCR data offers the potential for retrieving images that do not contain text. Moreover, this study did not address reducing the computational cost of creating search indexes for extensive image collections. We plan to reduce the cost of VDocRAG using more efficient techniques. Lastly, joint training of QA and retrieval components simultaneously further optimizes their interactions. + +# References + +[1] Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv:2404.14219, 2024. 2, 5, 6, 3 +[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. GPT-4 technical report. arXiv:2303.08774, 2023. 1, 3 +[3] Akari Asai, Sewon Min, Zexuan Zhong, and Danqi Chen. Retrieval-based language models and applications. In ACL, pages 41-46, 2023. 2 +[4] Ali Furkan Biten, Rubén Tito, Andrés Mafla, Lluis Gómez i Bigorda, Marçal Rusinol, C. V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In ICCV, pages 4290-4300, 2019. 6 +[5] Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In ICML, pages 2206-2240, 2022. 2 +[6] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3 +[7] Jingwen Chen, Yingwei Pan, Yehao Li, Ting Yao, Hongyang Chao, and Tao Mei. Retrieval augmented convolutional encoder-decoder networks for video captioning. TOMCCAP, pages 1-24, 2023. 2 +[8] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv:2209.14491, 2022. 2 +[9] Jaemin Cho, Debanjan Mahata, Ozan Irsoy, Yujie He, and Mohit Bansal. M3DocRAG: Multi-modal retrieval is what you need for multi-page multi-document understanding. arXiv:2411.04952, 2024. 2 +[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. arXiv:2305.06500, 2023. 2 +[11] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with io-awareness. In NeurIPS, pages 16344-16359, 2022. 5 +[12] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pages 4171–4186, 2019. 5, 6 +[13] Kuicai Dong, Yujing Chang, Xin Deik Goh, Dexun Li, Ruiming Tang, and Yong Liu. MMDocIR: Benchmarking multi-modal retrieval for long documents. arXiv:2501.08828, 2025. 2 +[14] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan, + +Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv:2404.06512, 2024. 4 +[15] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. The faiss library. arXiv:2401.08281, 2024. 4 +[16] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv:2407.21783, 2024. 1, 8 +[17] Manuel Faysse, Hugues Sibille, Tony Wu, Gautier Vi-aud, Céline Hudelot, and Pierre Colombo. ColPali: Efficient document retrieval with vision language models. arXiv:2407.01449, 2024. 1, 2, 3, 6 +[18] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pretraining. In ICML, pages 3929-3938, 2020. 1 +[19] Matthew Honnibal and Ines Montani. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear, 2017. 3 +[20] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. nplug-docowl 1.5: Unified structure learning forOCR-free document understanding. arXiv:2403.12895, 2024. 5 +[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. arXiv:2106.09685, 2021. 5 +[22] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. Unsupervised dense information retrieval with contrastive learning. arXiv:2112.09118, 2021. 5, 6, 3 +[23] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. arXiv:2310.06825, 2023. 6 +[24] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv:2401.04088, 2024. 1, 3 +[25] Ehsan Kamalloo, Nandan Thakur, Carlos Lassance, Xueguang Ma, Jheng-Hong Yang, and Jimmy Lin. Resources for brewing heir: Reproducible reference models and an official leaderboard, 2023. 6 +[26] Yuma Koizumi, Yasunori Ohishi, Daisuke Niizumi, Daiki Takeuchi, and Masahiro Yasuda. Audio captioning using pre-trained large-scale language model guided by audiobased similar caption retrieval. arXiv:2012.07331, 2020. 2 +[27] Sunjun Kweon, Yeonsu Kwon, Seonhee Cho, Yohan Jo, and Edward Choi. Open-WikiTable: Dataset for open domain question answering with complex reasoning over table. In Findings of ACL, pages 8285-8297, 2023. 3, 5, 1 + +[28] Jordy Landeghem, Rubén Tito, Łukasz Borchmann, Michal Pietruszka, Paweł Józiak, Rafał Powalski, Dawid Jurkiewicz, Mickaël Coustaty, Bertrand Ackaert, Ernest Valveny, et al. Document understanding dataset and evaluation (dude). In ICCV, pages 19528-19540, 2023. 2, 3, 5, 1 +[29] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. arXiv:2408.12637, 2024. 2, 8 +[30] Chankyu Lee, Rajarshi Roy, Mengyao Xu, Jonathan Raiman, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvEmbed: Improved techniques for training llms as generalist embedding models. arXiv:2405.17428, 2024. 5, 6, 3 +[31] Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. In NIPS, pages 9459-9474, 2020. 1 +[32] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, pages 12888-12900, 2022. 2 +[33] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, pages 19730–19742, 2023. 2 +[34] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. Towards general text embeddings with multi-stage contrastive learning. arXiv:2308.03281, 2023. 5, 6 +[35] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv:2304.08485, 2023. 2 +[36] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv:1711.05101, 2017. 5 +[37] Xueguang Ma, Sheng-Chieh Lin, Minghan Li, Wenhu Chen, and Jimmy Lin. Unifying multimodal retrieval via document screenshot embedding. arXiv:2406.11251, 2024. 1, 2, 5, 6, 3 +[38] Xueguang Ma, Shengyao Zhuang, Bevan Koopman, Guido Zuccon, Wenhu Chen, and Jimmy Lin. VISA: Retrieval augmented generation with visual source attribution. arXiv:2412.14457, 2024. 2 +[39] Seiji Maekawa, Hayate Iso, Sairam Gurajada, and Nikita Bhutani. Retrieval helps or hurts? a deeper dive into the efficacy of retrieval augmentation to language models. In NAACL, pages 5506-5521, 2024. 1, 2 +[40] Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In ACL, pages 9802-9822, 2023. 1, 2 +[41] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In Findings of ACL, pages 2263-2279, 2022. 2, 3, 5, 6, 1 +[42] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. DocVQA: A dataset for vqa on document images. In WACV, pages 2200-2209, 2021. 1, 2, 5 + +[43] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C.V. Jawahar. InfographicVQA. In WACV, pages 1697-1706, 2022. 1, 2, 3, 5 +[44] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv:1807.03748, 2018. 4 +[45] Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Retrieval augmented code generation and summarization. arXiv:2108.11601, 2021. 2 +[46] Le Qi, Shangwen Lv, Hongyu Li, Jing Liu, Yu Zhang, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ting Liu. DuReadervis: A Chinese dataset for open-domain document visual question answering. In Findings of ACL, pages 1338-1351, 2022. 2, 3 +[47] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 5, 6 +[48] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR, 21(140):1-67, 2020. 2 +[49] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. Incontext retrieval-augmented language models. TACL, pages 1316-1331, 2023. 2 +[50] Rita Ramos, Desmond Elliott, and Bruno Martins. Retrievalaugmented image captioning. In EACL, pages 3666-3681, 2023. 2 +[51] Rita Ramos, Bruno Martins, Desmond Elliott, and Yova Kementchedjhieva. Smallcap: lightweight image captioning prompted with retrieval augmentation. In CVPR, pages 2840-2849, 2023. 2 +[52] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389, 2009. 5, 6 +[53] Junyoung Seo, Susung Hong, Wooseok Jang, Ines Hyeonsu Kim, Minseop Kwak, Doyup Lee, and Seungryong Kim. Retrieval-augmented score distillation for text-to-3d generation. arXiv:2402.02972, 2024. 2 +[54] Ray Smith. An overview of the tesseractOCR engine. In ICDAR, pages 629-633, 2007. 5 +[55] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv:2210.09261, 2022. 1 +[56] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. VisualMRC: Machine reading comprehension on document images. In AAAI, pages 13878-13888, 2021. 1, 2, 3, 5 +[57] Ryota Tanaka, Kyosuke Nishida, Kosuke Nishida, Taku Hasegawa, Itsumi Saito, and Kuniko Saito. SlideVQA: A dataset for document visual question answering on multiple images. In AAAI, pages 13636-13645, 2023. 1, 2, 3, 5 + +[58] Ryota Tanaka, Taichi Iki, Kyosuke Nishida, Kuniko Saito, and Jun Suzuki. Instructdoc: A dataset for zero-shot generalization of visual document understanding with instructions. In AAAI, pages 19071-19079, 2024. 2 +[59] Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. Text embeddings by weakly-supervised contrastive pretraining. arXiv:2212.03533, 2022. 5, 6 +[60] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. Improving text embeddings with large language models. In ACL, pages 11897-11916, 2024. 5, 6, 3 +[61] Jilan Xu, Yifei Huang, Junlin Hou, Guo Chen, Yuejie Zhang, Rui Feng, and Weidi Xie. Retrieval-augmented egocentric video captioning. In CVPR, pages 13525-13536, 2024. 2 +[62] Dongchao Yang, Songxiang Liu, Rongjie Huang, Chao Weng, and Helen Meng. Instructtts: Modelling expressive tts in discrete latent space with natural language style prompt. TASLP, pages 2913-2925, 2024. 2 +[63] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, Qianyu Chen, Huarong Zhou, Zhensheng Zou, Haoye Zhang, Shengding Hu, Zhi Zheng, Jie Zhou, Jie Cai, Xu Han, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. Minicpm-v: A gpt-4v level mllm on your phone. arXiv:2408.01800, 2024. 6 +[64] Michihiro Yasunaga, Armen Aghajanyan, Weijia Shi, Rich James, Jure Leskovec, Percy Liang, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. Retrieval-augmented multimodal language modeling. In ICML, pages 39755-39769, 2023. 2 +[65] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Guohai Xu, Chenliang Li, Junfeng Tian, Qi Qian, Ji Zhang, Qin Jin, Liang He, Xin Lin, and Fei Huang. UReader: Universal OCR-free visually-situated language understanding with multimodal large language model. In EMNLP Findings, pages 2841-2858, 2023. 4 +[66] Shi Yu, Chaoyue Tang, Bokai Xu, Junbo Cui, Junhao Ran, Yukun Yan, Zhenghao Liu, Shuo Wang, Xu Han, Zhiyuan Liu, et al. VisRAG: Vision-based retrieval-augmented generation on multi-modality documents. arXiv:2410.10594, 2024. 2, 5, 6 +[67] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 2 +[68] Liang Zhang, Anwen Hu, Jing Zhang, Shuo Hu, and Qin Jin. MPMQA: multimodal question answering on product manuals. In AAAI, pages 13958-13966, 2023. 2, 3, 5, 1 +[69] Mingyuan Zhang, Xinying Guo, Liang Pan, Zhongang Cai, Fangzhou Hong, Huirong Li, Lei Yang, and Ziwei Liu. Remodiffuse: Retrieval-augmented motion diffusion model. In ICCV, pages 364-373, 2023. 2 +[70] Shuyan Zhou, Uri Alon, Frank F Xu, Zhiruo Wang, Zhengbao Jiang, and Graham Neubig. Docprompting: Generating code by retrieving the docs. arXiv:2207.05987, 2022. 2 +[71] Fengbin Zhu, Wenqiang Lei, Fuli Feng, Chao Wang, Haozhou Zhang, and Tat-Seng Chua. Towards complex doc + +ument understanding by discrete reasoning. In ACMM, pages 4857-4866, 2022. 2 + +# Supplementary Material + +
StatisticsNumber
Total Images206,267
Total Questions43,474
- Single-Hop Questions33,244 (76.5%)
- Multi-Hop Questions10,230 (23.5%)
- Extractive Answer19,797 (45.5%)
- Abstractive Answer23,677 (54.5%)
QA Source Datasets9
- Existing DocumentVQA Datasets7
- Existing TableQA Datasets1
- Our Newly Created Datasets1
Maximum Question Length58
Maximum Answer Length130
Average Question Length13.7
Average Answer Length3.7
+ +Table A. Main statistics in OpenDocVQA. + +![](images/7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg) + +![](images/f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg) +(a) Word cloud of questions. +(b) Word cloud of answers. + +# A. OpenDocVQA Details + +Dataset Statistics. The main statistics of OpenDocVQA are presented in Table A. There are two types of questions: single-hop (45.5%) and multi-hop (23.5%). Answers to questions are categorized as extractive (45.5%) and abstractive (54.5%) types. OpenDocVQA consists of nine open-domain DocumentVQA datasets, including a newly created MHDocVQA dataset to address multi-hop questions over multiple documents, and collected and filtered QA datasets as follows. + +- DocVQA [42] includes industry document images collected from the UCSF Industry Document Library. +- InfoVQA [43] includes infographics downloaded from the Internet for the search query "infographics". +- VisualMRC [56] is a visual machine reading comprehension on webpage screenshot images. +ChartQA [41] is a chart understanding dataset with human-written and machine-generated questions focusing on visual and logical reasoning. +- OpenWikiTable [27] is an open-domain question answering over tables. We took screenshot images of the tables, converting them into images with complex text layouts to handle visually-rich table data. +- DUDE [28] is a multi-page, multi-domain, and multi-industry QA dataset that requires processing long documents and understanding different types of documents. +- MPMQA [68] requires comprehending multimodal content in an entire product manual and answering questions. + +![](images/53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg) +Figure A. Word cloud distributions of question and answer texts. +Figure B. Distribution of first three words of the question. + +- SlideVQA [57] requires multi-hop reasoning over multiple slide images containing various text formats, layouts, and visual content such as plots and charts. + +Figure A presents word clouds of the most frequently appeared words in the question and answer texts, illustrating that OpenDocVQA covers a wide range of topics and words. This observation is further supported by Figure B, which is a sunburst of the first three words of the questions. + +Filtering DocumentVQA datasets. We applied the following five heuristic rules to automatically filter out likely + +Multi-hop Question Generation Prompt +```txt +EXAMPLE1: +question1: In which country is the GWP smallest? +answer1: Denmark +question2: What is the staple diet of Denmark? +answer2: Fish, cheese +combined question: What is the staple diet of the country where the GWP is the smallest? +EXAMPLE2: +question1: To which League does Chicago Cubs belong? +answer1: MLB +question2: What is the average MLB team value? +answer2: $1.5b +combined question: What is the average the league where Chicago Cubs belongs to team value? +EXAMPLE3 +question1: Which is the capital city of Germany? +answer1: Berlin +question2: What year did Berlin host the OKFestival? +answer2: It's 2014. +combined question: What year did the capital city of Germany host the OKFestival? +Based on the above 3 examples, provide a combined question for the following case, such that the answer to the combined question is the same as the answer2: +question1: {single-hop question} +answer1: {single-hop answer} +question2: {single-hop question} +answer2: {single-hop answer} +combined question: +``` + +Table B. Multi-hop question generation prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions. +Multi-hop Question Filtering Prompt +Table C. Multi-hop question filtering prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions. “{multi-hop question}” denotes the generated multi-hop questions. +```txt +question1: {single-hop question} +answer1: {single-hop answer} +question2: {single-hop question} +answer2: {single-hop answer} +Based on the questions and answers above, please answer the following question shortly. If the answer is not identified, the answer is 'None': {multi-hop question} +``` + +context-dependent questions: + +- The question has one or more demonstrative pronouns, including "this", "these", and "those". +- The question has one or more personal pronouns, including "she", "he", "her", "his", and "him". +- The question has one or more specific keywords, including "the document" and "mention". +- The question does not contain entities except for numbers. +- The question is shorter than six words. + +Any samples matching at least one of these rules were removed from our dataset. After applying the rules, we + +manually reviewed all the questions to ensure context-independence, guided by the instruction: "When you see the question without a given document, can you find a unique document in the corpus to provide a unique answer?" To validate our review, we randomly sampled 50 questions with their gold and top-5 retrieved documents (from VDocRetriever) and found no ambiguous cases, confirming the high quality of our process. + +Prompts for creating multi-hop questions. Table B shows the prompt for combining two single-hop questions + +
DatasetTask Description
DocVQAYou have to find an industry document that answers my question.
InfoVQAGiven a question, retrieve an infographic to answer the question.
VisualMRCI'm looking for a screenshot image that answers the question.
ChartQAGiven a user query, retrieve a chart image that answers the query.
OpenWikiTableGiven a user query, retrieve a table image for answering the question.
DUDEYou need to retrieve evidence from a PDF page to address the question.
MPMQAI want to know the answer to the question. Can you find evidence from manual pages?
SlideVQAGiven a question, retrieve a slide image to answer the question.
MHDocVQAGiven a multihop-question, retrieve multiple pages that can help answer the question.
+ +Table D. Instructions in the visual document retrieval task. + +
ModelModel Checkpoint
Contrieverfacebook/contriever-msmarco
E5intfloat/e5-base-v2
GTEthenlper/gte-base
E5-Mistralintfloat/e5-mistral-7b-instruct
NV-Embed-v2nvidia/NV-Embed-v2
CLIPopenai/clip-vit-large-patch14-336
DSETevatron/dse-phi3-docmatix-v1
VisRAG-Retopenmbv/VisRAG-Ret
Phi3Vmicrosoft/Phi-3-vision-128k-instruct
Idefics3HuggingFaceM4/Idefics3-8B-Llama3
+ +Table E. Model checkpoints stored on HuggingFace. + +
HyperparametersValue
Learning Rate1e-4
Gradient Accumulation4
Adam W β10.9
Adam W β20.999
LoRA Attention Dimension r8
LoRA Scaling Alpha64
LoRA Dropout0.1
LoRA Target*.proj
BF16True
+ +to generate multi-hop questions. Moreover, Table C shows the prompt for filtering the generated multi-hop questions. + +# B. Experimental Details + +Instruction templates. Following a standard LLM-based retrieval training and evaluation strategy [60], we applied natural language instruction templates to the original question for the visual document retrieval task: + +Instruct: {task description} \n Query: {question}, + +where “{task description}” is a placeholder for a one-sentence task description as shown in Table D. Note that the instruction format was applied to only LLM-based retrievers, including E5-Mistral [60], NV-Embed-v2 [30], + +Table F. Hyperparameters used for pre-training and fine-tuning. + +
Max Image ResolutionRetrievalANLSQA Generation Time
nDCG@5Encoding Time
336×33628.785.037.2394.5
672×67272.8106.442.7490.9
1344×134472.9204.456.2789.7
+ +Table G. Impact of image resolution on InfoVQA under the single-pool setting. Average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU. + +DSE [37], Phi3 [1], and VDocRetriever. Our preliminary experiments observed that using the instruction during both training and evaluation improved the performance of LLM-based retrievers. However, applying the same instruction format to non-LLM-based retrievers, such as Contriever [22], resulted in a performance decline due to lacking instruction-following capabilities. Furthermore, we appended an instruction regarding the desired output format for the DocumentVQA task: + +$\backslash$ n Answer briefly. + +Model checkpoints Table E shows model initialization checkpoints stored on HuggingFace1. + +Model hyperparameters Table F lists hyperparameters in pre-training and fine-tuning used for our models. + +# C. Additional Experimental Analysis + +How does image resolution impact performance? Table G shows that increasing image resolution improved the model's capability to understand and encode the document; however, it also significantly increased the inference time for both retrieval and QA tasks. Moreover, the performance in the QA task exhibited greater sensitivity to image resolution compared to the retrieval task, indicating that the QA task demands more detailed visual understanding. + +![](images/c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg) +Figure C. QA performance with various top-k on InfoVQA under the single-pool setting. () denotes document sources. + +How many retrieved documents to augment? Figure C shows that incorporating three documents yielded the best results in VDocRAG. While adding a few documents may include helpful contexts, adding more low-ranked or randomly sampled documents introduces noise and deteriorates generation due to the imperfections of retrievers. + +Additional qualitative results. Figure D shows qualitative results of VDocRAG compared to text-based RAG. VDocRAG demonstrates significant performance advantages in understanding layouts and visual content, such as tables, charts, figures, and diagrams. These findings highlight the critical role of representing documents as images to improve the performance of the RAG framework. + +# VDocRetriever + +How many apps does the company which makes Clash of Clans make? + +Ground-truth: 7 + +Text-based RAG: 61 + +![](images/f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg) + +VDocRAG: 7 + +![](images/8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg) + +![](images/02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg) +Top1 + +![](images/fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg) +Top2 + +# Text-based Retriever + +Top1 + +![](images/4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg) + +![](images/b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg) + +Top2 + +![](images/10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg) + +Top Free iOS App Earners + +![](images/60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg) + +What is the Stream Source for the API which uses Java, Scala, and Python? + +Ground-truth: HDFS, Network + +Text-based RAG: Fink + +![](images/98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg) + +VDocRAG: HDFS, Network + +![](images/7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg) +Top1 + +![](images/38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg) +Top2 + +![](images/36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg) +Top2 + +![](images/8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg) +Top1 + +The Reactive Streams Initiative + +Reactive Streams is an initiative to provide a standard for asynchronous stream processing with non-blocking back pressure on the JVM +Problem Scope +Handling streams of (live) data in an asynchronous and possibly non-blocking way Finding a minimal API describing the operations available on Reactive Streams +Implementers +Rxlava +Akka Streams +Reactor Composable Ratpack + +Which is Microsoft's biggest acquisition to date? + +Ground-truth: Skype + +Text-based RAG: Oculus + +![](images/bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg) + +VDocRAG: Skype + +![](images/c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg) + +![](images/46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg) +Top1 + +![](images/3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg) + +Increasing number of prominent successful exits +• Oracle TRUE2014 +• Acquired (Prestige) IPO Acquired (Oracle) +• Acquired (Open) IPO +• Gravity baily inktank + CDO +• Acquired (AOL) Acquired (Apple) Acquired (Red Hat) Acquired (Open) Acquired (Oracle) +• $8B+ in 2014 so far with more to come +upfront + +![](images/ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg) +Top2 + +![](images/8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg) +Top1 + +How many layers are used in the gloves for the DPE suit? + +Ground-truth: Three + +Text-based RAG: Two + +![](images/92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg) + +VDocRAG: Three + +![](images/9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg) + +![](images/f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg) +Top1 + +![](images/97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg) +Top + +![](images/e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg) +Top2 + +What is the phase before full moon? + +Ground-truth: Waxing Gibbous + +Text-based RAG: New Mod + +VDocRAG: Waxing Gibbous + +![](images/60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg) + +![](images/07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg) +Top1 + +![](images/0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg) +Top1 + +![](images/0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg) +op2 +Figure D. Additional qualitative results of VDocRAG compared to Text-based RAG. + +![](images/bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg) +Top2 \ No newline at end of file diff --git a/data/2025/2504_09xxx/2504.09795/images/00e4521b82c8ff62a3115b30dc25ed033f6dacd64ce4acc20b5c55599e537218.jpg b/data/2025/2504_09xxx/2504.09795/images/00e4521b82c8ff62a3115b30dc25ed033f6dacd64ce4acc20b5c55599e537218.jpg new file mode 100644 index 0000000000000000000000000000000000000000..57b08cb6d69a11c68670ca27c0e09d56ec978e0f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/00e4521b82c8ff62a3115b30dc25ed033f6dacd64ce4acc20b5c55599e537218.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:15785806f2ffe88844a572787b1373153f0248d0056086080142d6897ed354b1 +size 33734 diff --git a/data/2025/2504_09xxx/2504.09795/images/02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg b/data/2025/2504_09xxx/2504.09795/images/02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..72a0329bcabd8bc4718f7ef8974af823143b68fa --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:21ebfc1b93fccbf6e02f5eb7102a10bee063e7b89701fcb07b56c3cb282bd3df +size 8662 diff --git a/data/2025/2504_09xxx/2504.09795/images/0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg b/data/2025/2504_09xxx/2504.09795/images/0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg new file mode 100644 index 0000000000000000000000000000000000000000..cd6a4e79124783a5e4c13c9434e4a6b1dee574e5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:912410efa599a43940b0d294074f03255dce91d77f5878650697bdb7342f60bb +size 8059 diff --git a/data/2025/2504_09xxx/2504.09795/images/07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg b/data/2025/2504_09xxx/2504.09795/images/07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c8151b2c8704a351c393be6ccac0888d1a1fe3c2 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5fb04b3c62dda2defcb5cfa3938e557d95a3680b419a34ee80f2c8592cb28bc1 +size 7875 diff --git a/data/2025/2504_09xxx/2504.09795/images/0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg b/data/2025/2504_09xxx/2504.09795/images/0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..eb0d83e946417dcc01aec6c1c5fa787cb5f4797d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9fa949b3bb9245772565fe545ce4a917acff500053eaf7c45f0ddf51a1fc53c8 +size 7021 diff --git a/data/2025/2504_09xxx/2504.09795/images/10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg b/data/2025/2504_09xxx/2504.09795/images/10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e239d1440bb2abab409882167a82d7af86bc3596 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:80ad9a1540abb9ecd330be129d0f0aba24846f36b2c1e82718f813eced07fe95 +size 1007 diff --git a/data/2025/2504_09xxx/2504.09795/images/113d666e0d4830fb90f365048e0bf49664e7fea3e1aae273b56278576ea5f2f6.jpg b/data/2025/2504_09xxx/2504.09795/images/113d666e0d4830fb90f365048e0bf49664e7fea3e1aae273b56278576ea5f2f6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8d3300d8076547405c3e0539678f38e63f06581e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/113d666e0d4830fb90f365048e0bf49664e7fea3e1aae273b56278576ea5f2f6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:2206d5ff64ad0ea4777251f205b0762c502e3edfcb1bc2dc9e05f52619689831 +size 8992 diff --git a/data/2025/2504_09xxx/2504.09795/images/150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg b/data/2025/2504_09xxx/2504.09795/images/150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2a0f23014ac9b79bd5f128111fa93a7f64630274 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b6547f270b1988f012bc3cf4be99f86e02022be7daeab77631f4b41dfb94ddcb +size 6321 diff --git a/data/2025/2504_09xxx/2504.09795/images/1a194f91f9eb8affa85635e923392a2b9e31bb226c9ea35d790ff450f915ec84.jpg b/data/2025/2504_09xxx/2504.09795/images/1a194f91f9eb8affa85635e923392a2b9e31bb226c9ea35d790ff450f915ec84.jpg new file mode 100644 index 0000000000000000000000000000000000000000..943c0c588911656d46717a4fe47c68faf68e4d22 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/1a194f91f9eb8affa85635e923392a2b9e31bb226c9ea35d790ff450f915ec84.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bdf024b04157f19c974452a17de1d037c0200416ff3dab4ffbef3adfd9bc3011 +size 45509 diff --git a/data/2025/2504_09xxx/2504.09795/images/1f97e6f0def4cec4b8ceb886743b573b37cc841d1acb17b683d13dbe3cf287ab.jpg b/data/2025/2504_09xxx/2504.09795/images/1f97e6f0def4cec4b8ceb886743b573b37cc841d1acb17b683d13dbe3cf287ab.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a87fcbf4e7092876306f64571ae12bc58402664b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/1f97e6f0def4cec4b8ceb886743b573b37cc841d1acb17b683d13dbe3cf287ab.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:86a97d83a8b5e7b8f520403800ff7ca154bcd00605708137db5135d97ebd09fe +size 6333 diff --git a/data/2025/2504_09xxx/2504.09795/images/2c78af87a7f5b90dd0fafe9014a23fa580f89c137511a187dc4e21ee04dd4a3a.jpg b/data/2025/2504_09xxx/2504.09795/images/2c78af87a7f5b90dd0fafe9014a23fa580f89c137511a187dc4e21ee04dd4a3a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..aef074883bc64f458a2ebad0d90f0b018ac2c16b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/2c78af87a7f5b90dd0fafe9014a23fa580f89c137511a187dc4e21ee04dd4a3a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d862962dd1864c7b0f1ea05450e9da55833385de8b4caa376cdd72f3db1d069f +size 25689 diff --git a/data/2025/2504_09xxx/2504.09795/images/36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg b/data/2025/2504_09xxx/2504.09795/images/36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..988790649e177cc8689c0bd1b8232b7c77e747bb --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4de1e9a86e4ae9615f0bbe98163032dc1229ceda9900351793948866691f25ea +size 1005 diff --git a/data/2025/2504_09xxx/2504.09795/images/38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg b/data/2025/2504_09xxx/2504.09795/images/38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1527135a296ca9a2d96ef9ecec535e75814c3f98 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba2a8ff74257ac641e16c1a1f3a13987447ab46483bb61f6e6e24f081bf79e83 +size 10549 diff --git a/data/2025/2504_09xxx/2504.09795/images/3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg b/data/2025/2504_09xxx/2504.09795/images/3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b31dbd40867d970cbec5e084909be5be1d54040c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:04b0a2736590d3be7566b614d214633764e9c2cc312cbfd0936eeb671988a293 +size 5631 diff --git a/data/2025/2504_09xxx/2504.09795/images/44bd8baf3943678355dc4b467e0811d9b27915bb0fc9814308e0a335c81d698f.jpg b/data/2025/2504_09xxx/2504.09795/images/44bd8baf3943678355dc4b467e0811d9b27915bb0fc9814308e0a335c81d698f.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8f7024e9e095695611fc7c9b4dcceb6e569c337d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/44bd8baf3943678355dc4b467e0811d9b27915bb0fc9814308e0a335c81d698f.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:1642471c486a417fe225a81de3111a0b679c81c2ac9bc9b0b6d4222fb3ae1bbb +size 81260 diff --git a/data/2025/2504_09xxx/2504.09795/images/46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg b/data/2025/2504_09xxx/2504.09795/images/46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d9127b10ec2857979b10f2325335d745cb436b5b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:51ba57bcca74c8f3b4ae70f72bd55eeda6fd89a49cdf212ae490269e0685d718 +size 13850 diff --git a/data/2025/2504_09xxx/2504.09795/images/4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg b/data/2025/2504_09xxx/2504.09795/images/4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg new file mode 100644 index 0000000000000000000000000000000000000000..9f33fd54a667307253f4ca6b18655710b813436e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:d00430239fad07c3587c6cca593487a4bc8d1b08efa2e0375bc6be382abd0ed5 +size 957 diff --git a/data/2025/2504_09xxx/2504.09795/images/4e61e1c195bf6000753a2dd640be8401758f0293e8d421f6f33ad6103d78a7f0.jpg b/data/2025/2504_09xxx/2504.09795/images/4e61e1c195bf6000753a2dd640be8401758f0293e8d421f6f33ad6103d78a7f0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..abd788f4c279410c54b0e4ed678c1341bc2a044f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/4e61e1c195bf6000753a2dd640be8401758f0293e8d421f6f33ad6103d78a7f0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:32e8aef6a618760a8b6ecf531994cbf0380330e510d468d8da42238e1dba77d4 +size 25284 diff --git a/data/2025/2504_09xxx/2504.09795/images/5090daed0843b128ed56ee5afc1f886125beab326b643bf72d7df2aaafa12051.jpg b/data/2025/2504_09xxx/2504.09795/images/5090daed0843b128ed56ee5afc1f886125beab326b643bf72d7df2aaafa12051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b11b6630b44382df5061c08b40a7f14fde8756e6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/5090daed0843b128ed56ee5afc1f886125beab326b643bf72d7df2aaafa12051.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:4ed9a03d7cc179ab5c3196c1a28030cb6d0e9b2f836a02ae7b26b45f5791b10b +size 21137 diff --git a/data/2025/2504_09xxx/2504.09795/images/53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg b/data/2025/2504_09xxx/2504.09795/images/53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg new file mode 100644 index 0000000000000000000000000000000000000000..30284093a9f43c56bc8c9a2520e5b57abdbbb89c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5435cd11eacb51df5cac4c785240cba41572613f3d20d782110b1d00ceb78b31 +size 24121 diff --git a/data/2025/2504_09xxx/2504.09795/images/54efb87ac6dad28683019a443b5e882e7f23fa45ba4fe6d62ae166f575a26382.jpg b/data/2025/2504_09xxx/2504.09795/images/54efb87ac6dad28683019a443b5e882e7f23fa45ba4fe6d62ae166f575a26382.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e9fdad2acd5e2f9f5a2577ddbb23a62258a933d6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/54efb87ac6dad28683019a443b5e882e7f23fa45ba4fe6d62ae166f575a26382.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afae06d70a099fc2a8d911cc9f2065ab0c54827aafdad3cbbf0a1eeeef846d51 +size 59731 diff --git a/data/2025/2504_09xxx/2504.09795/images/5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg b/data/2025/2504_09xxx/2504.09795/images/5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6dee7ca23bf7076fd2f22eb7c90a53519719caf6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:55a2d0d9b8c09fdf50a5a40b3fd202377ac00b947bc3b4f69dd2945cd558f302 +size 1353 diff --git a/data/2025/2504_09xxx/2504.09795/images/60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg b/data/2025/2504_09xxx/2504.09795/images/60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1e431cfa48aad43713ebdc895bb60ca9f3d0c8ef --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:44c03acf35e367721b9d1fa205ae974a05770c61db1f6572afe7faa4b4fc286d +size 5152 diff --git a/data/2025/2504_09xxx/2504.09795/images/60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg b/data/2025/2504_09xxx/2504.09795/images/60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg new file mode 100644 index 0000000000000000000000000000000000000000..6fa8e7cb8d59c2b2c6d8f19452c5065ead26f759 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7e2734990485ff0b23cee8015b084ec132d958bf0edafc9dc9311616604973e +size 7068 diff --git a/data/2025/2504_09xxx/2504.09795/images/621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg b/data/2025/2504_09xxx/2504.09795/images/621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..bcffddf22c8408da8de0da882c72c588c11f101c --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ba4e97ed4357d84aefe3a32ac8df35621c28d8eeb65779936d8ab91e2fe2b730 +size 34501 diff --git a/data/2025/2504_09xxx/2504.09795/images/73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg b/data/2025/2504_09xxx/2504.09795/images/73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4371d92ce7a2b717375a175d4f7c0292f9de013a --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1d35f6860c1497eb0a3b3f1d96f082923d7eb5316353f6b8350712adb7200fb +size 12709 diff --git a/data/2025/2504_09xxx/2504.09795/images/7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg b/data/2025/2504_09xxx/2504.09795/images/7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c712a8b03ed71fdeb4991d768b4015798465e0f1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:24e01b974310c741da79353420be18888cf4dc59229e1d0cf29f256e4c4d229d +size 10946 diff --git a/data/2025/2504_09xxx/2504.09795/images/7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg b/data/2025/2504_09xxx/2504.09795/images/7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg new file mode 100644 index 0000000000000000000000000000000000000000..1449249ae39047b18704eb7d85ad8a198d5b4081 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c299308c47efb0bf0c6e3e342002af865b9fbfb23f9259a48a237f9ece1437e7 +size 39638 diff --git a/data/2025/2504_09xxx/2504.09795/images/8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg b/data/2025/2504_09xxx/2504.09795/images/8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7d0760473098ad54c50c8e306f88324e1aa86c25 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e9e55007f0fc425fff11ddd3e7232770698f293d1148830daf503ed7e6ed34ae +size 9380 diff --git a/data/2025/2504_09xxx/2504.09795/images/8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg b/data/2025/2504_09xxx/2504.09795/images/8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg new file mode 100644 index 0000000000000000000000000000000000000000..86566d620dddf2a699f13a21dc8550a39e581441 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:7da7610fd1ef33b85230b8b84e260a6910ee245277c6e92192bf17aaf0a46ba5 +size 13233 diff --git a/data/2025/2504_09xxx/2504.09795/images/8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg b/data/2025/2504_09xxx/2504.09795/images/8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg new file mode 100644 index 0000000000000000000000000000000000000000..35338e25b4c34cc8fef25991a470f1a9bb27dd38 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:94ea73d252913cc671bfec3c7e88ddc46caf36036dc6f5f32d5d701bd4e97140 +size 878 diff --git a/data/2025/2504_09xxx/2504.09795/images/9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg b/data/2025/2504_09xxx/2504.09795/images/9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg new file mode 100644 index 0000000000000000000000000000000000000000..c6a418607b8663b32c7fecf9faf6649c11796c88 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:ff4526f186b6082ce84fe48026292abe0b229f7fa6930f6b4e360b57e309aa6f +size 914 diff --git a/data/2025/2504_09xxx/2504.09795/images/92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg b/data/2025/2504_09xxx/2504.09795/images/92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg new file mode 100644 index 0000000000000000000000000000000000000000..25c4ca039d36bab9a63fe76ec02308576c01bac9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a1a8260c8be6adc35936a1a7f827d9d9c33b62dcd166f5a97d4e59c0db544d86 +size 979 diff --git a/data/2025/2504_09xxx/2504.09795/images/957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg b/data/2025/2504_09xxx/2504.09795/images/957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27627052f1ed046758ca9ab980292a62684ee860 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:87d0daa34a12e4d92b5317aab35fd40797ae34f0c7484a9e71f22d94addc556d +size 6165 diff --git a/data/2025/2504_09xxx/2504.09795/images/97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg b/data/2025/2504_09xxx/2504.09795/images/97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2ab96b6f0bd80318fa27d309772235259a19ea73 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a8a645b5bd6ddf2483f62ae64d8ee3f99f0271d77537771d9840231bbdd030a8 +size 9283 diff --git a/data/2025/2504_09xxx/2504.09795/images/98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg b/data/2025/2504_09xxx/2504.09795/images/98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg new file mode 100644 index 0000000000000000000000000000000000000000..5d99d72f5cd8fcee1ebd04437e3fb1afb537b992 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:89eace93afece83d4e6e9da9fcfda7f159b40379e9c9dbcdefd221c278155120 +size 978 diff --git a/data/2025/2504_09xxx/2504.09795/images/9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg b/data/2025/2504_09xxx/2504.09795/images/9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d76aeafb3f053c88e522abbfa533d5a4b28cd52d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:170da7a6c2186560085739dad1eeb1bec6bcc2acaa3f9f54efb62e4f51e650cd +size 28220 diff --git a/data/2025/2504_09xxx/2504.09795/images/9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg b/data/2025/2504_09xxx/2504.09795/images/9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg new file mode 100644 index 0000000000000000000000000000000000000000..54e7dea613aa93437f015f1dc16742b7d7061d69 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:dc275a3d65195ac9c131dc99f19ea8c180ffbd0456e8e0933bfeb9fd68522c04 +size 20545 diff --git a/data/2025/2504_09xxx/2504.09795/images/a670a379606925aebe13fb798758eded19703da2df393c27de2a6123d0706960.jpg b/data/2025/2504_09xxx/2504.09795/images/a670a379606925aebe13fb798758eded19703da2df393c27de2a6123d0706960.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4658ad5104f480d17ffb15a82eac676455e003e6 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/a670a379606925aebe13fb798758eded19703da2df393c27de2a6123d0706960.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:427044be92a79c97d61ad08914948445bf4b15cb972dee1f17fc6e02e762f08e +size 46310 diff --git a/data/2025/2504_09xxx/2504.09795/images/ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg b/data/2025/2504_09xxx/2504.09795/images/ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ca69c75730aea3b33ad28ac35661f16da36bc49d --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:96a667fadc08b21398bef4511ea1f5cf3eef271f3cd7e1289984fb6889b6c7b2 +size 983 diff --git a/data/2025/2504_09xxx/2504.09795/images/b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg b/data/2025/2504_09xxx/2504.09795/images/b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e3e29cb458566dd41fd5a040f4720c10cd65ef8e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:8ec0914d6b2353e97149552aacab6ca1caa34b81dea4d20a5245a56c6efe1504 +size 10874 diff --git a/data/2025/2504_09xxx/2504.09795/images/bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg b/data/2025/2504_09xxx/2504.09795/images/bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e80a076e2bec8350e98c2a2033d016acdcca6e9b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fa2d1fac5aee7dc50b1d82df2d324d6a7817768911cb96ff5c8cbbb75f82411d +size 5412 diff --git a/data/2025/2504_09xxx/2504.09795/images/bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg b/data/2025/2504_09xxx/2504.09795/images/bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg new file mode 100644 index 0000000000000000000000000000000000000000..d06ced464b9068fb59894f27fecfd89d422e6d74 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e413cf2f04a488a1a84acd676ed3913170d090886b962f6844c2c44943e855e3 +size 962 diff --git a/data/2025/2504_09xxx/2504.09795/images/c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg b/data/2025/2504_09xxx/2504.09795/images/c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg new file mode 100644 index 0000000000000000000000000000000000000000..8e689bc04aff2cea921c712eef178fb00ac23cc9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:69c47565117bd72072f335ec2855b3078992bcf7b6ce27df100ca2a9551a5f8d +size 21794 diff --git a/data/2025/2504_09xxx/2504.09795/images/c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg b/data/2025/2504_09xxx/2504.09795/images/c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg new file mode 100644 index 0000000000000000000000000000000000000000..e2b9a34b969d5e3a65875a31fc071225a4996ab1 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:527afb538f0194ee4048c3ffae9353a9b152e158870ed6b1d24c2395f10fcb4c +size 935 diff --git a/data/2025/2504_09xxx/2504.09795/images/cb5bfec95a153aac6c77d40d97d65aefdbbaadf610a541398d6e0b294385ba9e.jpg b/data/2025/2504_09xxx/2504.09795/images/cb5bfec95a153aac6c77d40d97d65aefdbbaadf610a541398d6e0b294385ba9e.jpg new file mode 100644 index 0000000000000000000000000000000000000000..4f7ddf61f973f535ee8da933745a810fffa1034f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/cb5bfec95a153aac6c77d40d97d65aefdbbaadf610a541398d6e0b294385ba9e.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:e7df7b7192fbac8192a1ca81b4e3333d6e969f46b01d2479dfd90e75a3bea392 +size 21888 diff --git a/data/2025/2504_09xxx/2504.09795/images/d3a6583b9ab0f54d816ebe6f818bb357823c7fc0b4fbdeaae0b828e9de90fee4.jpg b/data/2025/2504_09xxx/2504.09795/images/d3a6583b9ab0f54d816ebe6f818bb357823c7fc0b4fbdeaae0b828e9de90fee4.jpg new file mode 100644 index 0000000000000000000000000000000000000000..7ef3e41fce24cf6c2f0eb765b4ecb00fab43d4f5 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/d3a6583b9ab0f54d816ebe6f818bb357823c7fc0b4fbdeaae0b828e9de90fee4.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bbb4c8af6b6d95c498d71f8a47d54d837a52bb286d631a605e1efa8241f8aaef +size 117559 diff --git a/data/2025/2504_09xxx/2504.09795/images/d4e697531784a0bdf0197a13e8f684df42e426dd4eef7cbab43178197d461901.jpg b/data/2025/2504_09xxx/2504.09795/images/d4e697531784a0bdf0197a13e8f684df42e426dd4eef7cbab43178197d461901.jpg new file mode 100644 index 0000000000000000000000000000000000000000..b6550ec455986f5465ae06bf3d8aea7976ca8238 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/d4e697531784a0bdf0197a13e8f684df42e426dd4eef7cbab43178197d461901.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:a0c0cc280b6430354e0f6a98320e30f194fc53cdb4b8d5f66fb39ec95c7e0c38 +size 17468 diff --git a/data/2025/2504_09xxx/2504.09795/images/daa3016ce5d5e7782cea565c8faa26b1c6efa12759c7e13273fa9d8ffc3e6863.jpg b/data/2025/2504_09xxx/2504.09795/images/daa3016ce5d5e7782cea565c8faa26b1c6efa12759c7e13273fa9d8ffc3e6863.jpg new file mode 100644 index 0000000000000000000000000000000000000000..2478f44cad3ec6da831315190cc998583dbabce4 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/daa3016ce5d5e7782cea565c8faa26b1c6efa12759c7e13273fa9d8ffc3e6863.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:70fff95f7cd3e97b25ff34d8af1a99e20d51dcca2748ba83e6cbfcd809565ece +size 26999 diff --git a/data/2025/2504_09xxx/2504.09795/images/dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg b/data/2025/2504_09xxx/2504.09795/images/dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg new file mode 100644 index 0000000000000000000000000000000000000000..a572c8c0bdd3084302e5de4f9307c42cd17ef833 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54253443b17d6d96cc2d5ba56faf047ba84e6342a7b3c190af53f0a1d7adffe4 +size 917 diff --git a/data/2025/2504_09xxx/2504.09795/images/dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg b/data/2025/2504_09xxx/2504.09795/images/dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg new file mode 100644 index 0000000000000000000000000000000000000000..26bfa6262b8e5cbaf3a99847fb15b3f98bf8c2bf --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:26ed268b6c3312337cbfab19ac25042d6906fc06d735a257528dc02c5d9fbbf7 +size 80882 diff --git a/data/2025/2504_09xxx/2504.09795/images/df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg b/data/2025/2504_09xxx/2504.09795/images/df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg new file mode 100644 index 0000000000000000000000000000000000000000..31d86faed5a02f74bc06564730647de34535f671 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:5b587ac9492e8612c9c2c3e9372568c30cb781e850973103c99bb69996fefa12 +size 12390 diff --git a/data/2025/2504_09xxx/2504.09795/images/e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg b/data/2025/2504_09xxx/2504.09795/images/e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg new file mode 100644 index 0000000000000000000000000000000000000000..3e2543b69779661bedd7e98b2e79648e04b93480 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:afebcd5ba85976535ac1aef243751862f4f52f1a48d12e11a721bd2c209eb73d +size 12472 diff --git a/data/2025/2504_09xxx/2504.09795/images/e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg b/data/2025/2504_09xxx/2504.09795/images/e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg new file mode 100644 index 0000000000000000000000000000000000000000..33eb4e2e5717cf66ce87d9274bc51d8bcad0d596 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:b3b703d0fcb234983873c4e2e22cdb7d3d69834ce0007e2eb93359af831a32ee +size 117026 diff --git a/data/2025/2504_09xxx/2504.09795/images/e9f6bab9c5e222d059bdff8614152323cb97a9ba24e7c9b7f1263981d7f9fd2b.jpg b/data/2025/2504_09xxx/2504.09795/images/e9f6bab9c5e222d059bdff8614152323cb97a9ba24e7c9b7f1263981d7f9fd2b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..27b02d9ba04b688d79a0872944a2776f62d700e9 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/e9f6bab9c5e222d059bdff8614152323cb97a9ba24e7c9b7f1263981d7f9fd2b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:54a0d5d7870ca93201921704f96001706b87bcbd4708bb07ef95199a6d1673a0 +size 55483 diff --git a/data/2025/2504_09xxx/2504.09795/images/f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg b/data/2025/2504_09xxx/2504.09795/images/f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg new file mode 100644 index 0000000000000000000000000000000000000000..21d118c3e70178a754a5cfe1df4919b99e4da54f --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:251ace0066e11e116b4b5905bc30050cb3a9a22d3a1aca1935fc1ba2aef577f6 +size 17861 diff --git a/data/2025/2504_09xxx/2504.09795/images/f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg b/data/2025/2504_09xxx/2504.09795/images/f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f252de0beebbb169740427249b1511c507e12c7b --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:f9ec71aae8984f043ae52b0270a0bafaf6df9631344e85fcfac9783cfb3fdcdb +size 40670 diff --git a/data/2025/2504_09xxx/2504.09795/images/f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg b/data/2025/2504_09xxx/2504.09795/images/f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg new file mode 100644 index 0000000000000000000000000000000000000000..078e1be3970c694d7dab6c2a3bf68a900f4be33e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:bb03c600f762e749f2c1258dbbb68a87e017e88730aed1aa1059bef838c90dbe +size 40252 diff --git a/data/2025/2504_09xxx/2504.09795/images/f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg b/data/2025/2504_09xxx/2504.09795/images/f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg new file mode 100644 index 0000000000000000000000000000000000000000..ae162dd74a2d509263352190fc734370bd797227 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:fb60357931ecc3752ad4287048533794b56d08d7b2c108f9a85e8951468cf9da +size 906 diff --git a/data/2025/2504_09xxx/2504.09795/images/fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg b/data/2025/2504_09xxx/2504.09795/images/fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg new file mode 100644 index 0000000000000000000000000000000000000000..23eef16ee1e196c8d1f99db190b2f61b3dea8b49 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:9ec7d8ff20b8d9d4518c990eba6fcf15e62603795c24a9412da9d3aeff02e47c +size 20932 diff --git a/data/2025/2504_09xxx/2504.09795/images/fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg b/data/2025/2504_09xxx/2504.09795/images/fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg new file mode 100644 index 0000000000000000000000000000000000000000..f3739442f3863264858ae99d0eca9318e214a92e --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:c51874cc9eafc454e2370a853a3568e4a311edbcb17a69db3359b666f62ee103 +size 9760 diff --git a/data/2025/2504_09xxx/2504.09795/images/fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg b/data/2025/2504_09xxx/2504.09795/images/fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg new file mode 100644 index 0000000000000000000000000000000000000000..04f743b5ec576c740d766b81ca0f821833cc7415 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/images/fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg @@ -0,0 +1,3 @@ +version https://git-lfs.github.com/spec/v1 +oid sha256:63c9d786f731c9101d488ada933abd3468dea26e3a2a37b5449599d0dd8e8363 +size 42100 diff --git a/data/2025/2504_09xxx/2504.09795/layout.json b/data/2025/2504_09xxx/2504.09795/layout.json new file mode 100644 index 0000000000000000000000000000000000000000..e19321ceff2b49742ad7bbd52290046cc56fbed3 --- /dev/null +++ b/data/2025/2504_09xxx/2504.09795/layout.json @@ -0,0 +1,13105 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "spans": [ + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": "Ryota Tanaka" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1,2}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": " Taichi Iki" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": " Taku Hasegawa" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": " Kyosuke Nishida" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": " Kuniko Saito" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": " Jun Suzuki" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "inline_equation", + "content": "^{1}" + }, + { + "bbox": [ + 61, + 145, + 558, + 190 + ], + "type": "text", + "content": "NTT Human Informatics Laboratories, NTT Corporation \nhttps://vdocrag.github.io" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 151, + 216, + 200, + 228 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 151, + 216, + 200, + 228 + ], + "spans": [ + { + "bbox": [ + 151, + 216, + 200, + 228 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 240, + 297, + 504 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 240, + 297, + 504 + ], + "spans": [ + { + "bbox": [ + 54, + 240, + 297, + 504 + ], + "type": "text", + "content": "We aim to develop a retrieval-augmented generation (RAG) framework that answers questions over a corpus of visually-rich documents presented in mixed modalities (e.g., charts, tables) and diverse formats (e.g., PDF, PPTX). In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied documents and modalities in a unified image format to prevent missing information that occurs by parsing documents to obtain text. To improve the performance, we propose novel self-supervised pre-training tasks that adapt large vision-language models for retrieval by compressing visual information into dense token representations while aligning them with textual content in documents. Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain document visual question answering datasets, encompassing diverse document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments show that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization capability, highlighting the potential of an effective RAG paradigm for real-world documents." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 525, + 135, + 538 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 525, + 135, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 525, + 135, + 538 + ], + "type": "text", + "content": "1. Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 54, + 545, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 545, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 54, + 545, + 295, + 713 + ], + "type": "text", + "content": "Large language models (LLMs) have demonstrated impressive performance on diverse natural language tasks [2, 16, 24, 55]. These models struggle with factual errors despite their increased model and data scale [39, 40]. To remedy this problem, retrieval-augmented generation (RAG) methods [18, 31] can retrieve knowledge from an external corpus, potentially reducing hallucination and increasing knowledge coverage. Most previous RAG frameworks assume the context is composed entirely of text, with no graphical elements. In contrast, a significant amount of real-world information is stored in visually-rich documents, such as charts, tables, web pages, and office documents. These documents often contain both textual and visual objects, with content spread structurally across various loca" + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 321, + 217, + 547, + 406 + ], + "blocks": [ + { + "bbox": [ + 321, + 217, + 547, + 406 + ], + "lines": [ + { + "bbox": [ + 321, + 217, + 547, + 406 + ], + "spans": [ + { + "bbox": [ + 321, + 217, + 547, + 406 + ], + "type": "image", + "image_path": "dbae51a8466af1104918b799e875239d66dc9bedd9befb480bbfb0a5a8186151.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 313, + 419, + 555, + 464 + ], + "lines": [ + { + "bbox": [ + 313, + 419, + 555, + 464 + ], + "spans": [ + { + "bbox": [ + 313, + 419, + 555, + 464 + ], + "type": "text", + "content": "Figure 1. Our framework of VDocRAG and examples from OpenDocVQA. VDocRAG consists of VDocRetirver and VDocGenerator, which can retrieve relevant documents and generate answers by understanding the original appearance of documents." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 486, + 500, + 498 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 486, + 500, + 498 + ], + "spans": [ + { + "bbox": [ + 313, + 486, + 500, + 498 + ], + "type": "text", + "content": "tions depending on diverse formats and types." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 312, + 498, + 555, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 498, + 555, + 689 + ], + "spans": [ + { + "bbox": [ + 312, + 498, + 555, + 689 + ], + "type": "text", + "content": "Thus, document visual question answering (DocumentVQA) [42, 43, 56, 57] aims to build an agent capable of reading and comprehending document images to answer the question. Here, most existing DocumentVQA questions operate in a closed setting without requiring any retrieval. While this definition simplifies the QA model, it does not reflect many real-world use cases where the question is asked through some open-domain natural language interface, such as QA systems searching information across in-house documents or customer service chatbots on e-commerce websites. To address this limitation, recent works have introduced retrieval tasks on document images [17, 37]. However, these cannot fully develop models that effectively integrate the retrieved information into the final output. This gap hinders the application of DocumentVQA models in more realistic, open-domain scenarios." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 713 + ], + "type": "text", + "content": "In this paper, we introduce a new RAG framework, VDocRAG, which can directly understand varied docu" + } + ] + } + ], + "index": 12 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 14, + 210, + 37, + 560 + ], + "type": "aside_text", + "angle": 270, + "lines": [ + { + "bbox": [ + 14, + 210, + 37, + 560 + ], + "spans": [ + { + "bbox": [ + 14, + 210, + 37, + 560 + ], + "type": "text", + "content": "arXiv:2504.09795v1 [cs.CL] 14 Apr 2025" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 60, + 98, + 81, + 121 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 60, + 98, + 81, + 121 + ], + "spans": [ + { + "bbox": [ + 60, + 98, + 81, + 121 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 82, + 106, + 550, + 124 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 106, + 550, + 124 + ], + "spans": [ + { + "bbox": [ + 82, + 106, + 550, + 124 + ], + "type": "text", + "content": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 55, + 72, + 294, + 323 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 72, + 294, + 323 + ], + "spans": [ + { + "bbox": [ + 55, + 72, + 294, + 323 + ], + "type": "text", + "content": "ments and modalities in a unified image format to avoid tedious parsing and potential information loss that occurs in conventional text-based RAG. As depicted in Figure 1, VDocRAG consists of two main components, both of which effectively leverage the visual features of documents. First, VDocRetriever retrieves document images related to the question from a corpus of document images. Second, VDocGenerator uses these retrieved images to generate the answer. To encode document images and interact with the encoded information, we adapt pre-trained large vision language models (LVLMs) [1, 29] as the backbone for VDocRAG. Since LVLMs are inherently generative models, it is sub-optimal for embeddings as they prevent the representations from capturing information across the entire input sequence due to the training objective (i.e., next-token prediction). To bridge this gap, we introduce new self-supervised pre-training tasks that harness the understanding and generation capabilities of LVLMs to enhance representation learning. Specifically, we compress the entire image representation into a dense token representation, by aligning the text in documents via retrieval and generation tasks." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 323, + 294, + 430 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 294, + 430 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 294, + 430 + ], + "type": "text", + "content": "Furthermore, we introduce OpenDocVQA, the first unified collection of open-domain DocumentVQA datasets encompassing a wide range of document types and formats. OpenDocVQA provides a comprehensive resource for training and evaluating retrieval and question answering models on visually-rich documents in an open-domain setting. Experiments demonstrate that VDocRAG substantially outperforms conventional text-based RAG and has strong generalization performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 431, + 276, + 442 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 431, + 276, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 431, + 276, + 442 + ], + "type": "text", + "content": "Our main contributions are summarized as follows:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 443, + 295, + 538 + ], + "type": "list", + "angle": 0, + "index": 6, + "blocks": [ + { + "bbox": [ + 55, + 443, + 295, + 479 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 443, + 295, + 479 + ], + "spans": [ + { + "bbox": [ + 55, + 443, + 295, + 479 + ], + "type": "text", + "content": "- We introduce a new RAG framework, VDocRAG, which can directly understand diverse real-world documents purely from visual features." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 479, + 295, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 479, + 295, + 514 + ], + "spans": [ + { + "bbox": [ + 55, + 479, + 295, + 514 + ], + "type": "text", + "content": "- We are the first to explore pre-training tasks designed for document retrieval-oriented adaptation of LVLMs, by compressing visual document representations." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 515, + 295, + 538 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 515, + 295, + 538 + ], + "spans": [ + { + "bbox": [ + 55, + 515, + 295, + 538 + ], + "type": "text", + "content": "- We introduce OpenDocVQA, the first unified open-domain DocumentVQA dataset with diverse documents." + } + ] + } + ], + "index": 5 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 549, + 142, + 562 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 549, + 142, + 562 + ], + "spans": [ + { + "bbox": [ + 55, + 549, + 142, + 562 + ], + "type": "text", + "content": "2. Related Work" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 295, + 713 + ], + "type": "text", + "content": "Retrieval-augmented generation (RAG). RAG in the NLP community aims at retrieving external knowledge to reduce factual errors and enhance performance in various knowledge-intensive tasks [3, 5, 39, 40, 49]. Inspired by the success of RAG in NLP, this technique has also applied applications across different domains, including images [8, 50, 51, 64], codes [45, 70], videos [7, 61], audio [26, 62], and 3D [53, 69]. However, most existing works have focused on retrieving knowledge from only plain-text documents or non-text media. In contrast, we tackle the challenge of extracting knowledge from visually-rich documents organized in complex, multimodal formats." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 72, + 555, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 72, + 555, + 359 + ], + "spans": [ + { + "bbox": [ + 313, + 72, + 555, + 359 + ], + "type": "text", + "content": "Visual document retrieval and visual RAG. With the success of LLMs, there is a growing trend to build large vision language models (LVLMs) that integrate image understanding capabilities by combining image encoders [32, 48, 67] with LLMs [1, 10, 29, 33, 35, 58]. Concurrent works in visual document retrieval [13, 17, 37] and visual RAG [9, 38, 66] leverage LVLMs to directly encode visually-rich documents through images. However, these approaches have trouble understanding diverse real-world documents due to the limitations of their datasets and training strategies. The existing visual document retrieval dataset, ViDoRe [37], contains questions that might not require retrieval and handles a limited number of document types, resulting in a gap between real-world scenarios. In contrast, our dataset covers open document types and provides questions that are verified by humans to require retrieval and to have context-independent conditions for the retrieval. From the perspective of training, despite the significant gap between generative pre-training tasks and retrieval tasks in LVLMs, previous works [9, 17, 37, 38, 66] leverage LVLMs without specific training for bridging the gap. To address this, we introduce pre-training tasks that transfer the understanding and generation capabilities of LVLMs to retrievers." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 376, + 555, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 376, + 555, + 605 + ], + "spans": [ + { + "bbox": [ + 313, + 376, + 555, + 605 + ], + "type": "text", + "content": "Document visual question answering (DocumentVQA). DocumentVQA is a high-level document understanding task that involves answering questions on visually-rich documents. These documents include a variety of elements, such as handwritten and digital text [42, 56], complex layouts [28, 68, 71], and graphical elements [41, 43, 57]. However, previous studies have assumed closed settings that do not require retrieval, except for Dureader_vis [46]. Our work differs from Dureader_vis as follows. First, OpenDocVQA covers a wide range of document formats and domains, while Dureader_vis focuses on screenshots of websites, limiting its generalizability. Second, OpenDocVQA reflects more real-world scenarios that require both single- and multi-hop reasoning over documents, while Dureader_vis requires only single-hop reasoning. Lastly, even lexical search methods yield sufficient performance in Dureader_vis due to its reliance on textual content. In contrast, OpenDocVQA requires a visual semantic search where visual and contextual information can be exploited." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 616, + 495, + 629 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 616, + 495, + 629 + ], + "spans": [ + { + "bbox": [ + 313, + 616, + 495, + 629 + ], + "type": "text", + "content": "3. OpenDocVQA Task and Dataset" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 635, + 421, + 647 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 635, + 421, + 647 + ], + "spans": [ + { + "bbox": [ + 313, + 635, + 421, + 647 + ], + "type": "text", + "content": "3.1. Task Formulation" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": "Given a large collection of " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " document images " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{I} = \\{I_1,\\dots,I_N\\}" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " and a question " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": ", the goal of OpenDocVQA task is to output an answer " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " by finding the relevant " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " images " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{I}}\\in \\mathcal{I}" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "k\\ll N" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": ". We decompose the task into two stages. Visual document retrieval: given " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 313, + 653, + 555, + 715 + ], + "type": "text", + "content": "," + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 303, + 189 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 303, + 189 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 303, + 189 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 303, + 189 + ], + "type": "image", + "image_path": "f629faeeb9a32204f44c19802ec06137e3e8e181cebc80a8db81f829d802e5c6.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 198, + 295, + 210 + ], + "lines": [ + { + "bbox": [ + 55, + 198, + 295, + 210 + ], + "spans": [ + { + "bbox": [ + 55, + 198, + 295, + 210 + ], + "type": "text", + "content": "Figure 2. Process of creating multi-hop DocumentVQA questions." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "spans": [ + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": "the model retrieves the relevant " + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": " images " + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{I}}" + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": " from which to derive the answer. DocumentVQA: the model takes " + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": " and the retrieved images " + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{I}}" + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": " as input, to generate " + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 55, + 229, + 295, + 266 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 266, + 296, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 266, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 266, + 296, + 361 + ], + "type": "text", + "content": "OpenDocVQA covers multiple open-domain DocumentVQA datasets with diverse document types. To reflect real-world scenarios, we evaluate models with both single-pool and all-pool settings. In the single-pool setting, retrieval is performed from a specific pool of documents provided by each original dataset. The all-pool setting requires retrieving from the entire candidate pool, which includes documents from a wide range of domains." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 369, + 164, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 369, + 164, + 380 + ], + "spans": [ + { + "bbox": [ + 55, + 369, + 164, + 380 + ], + "type": "text", + "content": "3.2. Dataset Collection" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 386, + 296, + 517 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 386, + 296, + 517 + ], + "spans": [ + { + "bbox": [ + 55, + 386, + 296, + 517 + ], + "type": "text", + "content": "Filtering of DocumentVQA datasets. We collected and filtered instances of seven existing document VQA datasets [28, 41-43, 56, 57, 68]. Most of their questions are context-dependent conditions, where they cannot be answered without referencing the accompanying document (e.g., What is the title?). Therefore, we filtered out questions lacking sufficient context for retrieval. To address this, we initially applied heuristic rules to automatically select likely context-independent questions, reducing the pool by " + }, + { + "bbox": [ + 55, + 386, + 296, + 517 + ], + "type": "inline_equation", + "content": "20.9\\%" + }, + { + "bbox": [ + 55, + 386, + 296, + 517 + ], + "type": "text", + "content": ". Then, we manually reviewed and verified the remaining examples to ensure their context independence." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 55, + 532, + 296, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 532, + 296, + 616 + ], + "spans": [ + { + "bbox": [ + 55, + 532, + 296, + 616 + ], + "type": "text", + "content": "Reformulation of TableQA dataset. We used QA pairs from Open-WikiTable [27], an open-domain TableQA dataset that required retrieving tables from Wikipedia to answer the question. Since the original dataset provides tables in only textual format (HTML data), we took the screenshot images of tables from the corresponding Wikipedia pages to reformulate the task as the OpenDocVQA." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "content": "Creation of new multi-hop questions. To enhance the model's ability to interact with multiple document sources (e.g., charts and tables), we semi-automatically created a multi-hop DocumentVQA dataset, MHDocVQA, using the single-hop QA pairs collected in the previous steps. As shown in Figure 2, the creating process involved the following steps: (1) We first used spaCy [19] to identify a bridge" + } + ] + } + ], + "index": 7 + }, + { + "type": "table", + "bbox": [ + 317, + 70, + 554, + 189 + ], + "blocks": [ + { + "bbox": [ + 317, + 70, + 554, + 189 + ], + "lines": [ + { + "bbox": [ + 317, + 70, + 554, + 189 + ], + "spans": [ + { + "bbox": [ + 317, + 70, + 554, + 189 + ], + "type": "table", + "html": "
ViDoRe [17]Dureadervis [46]OpenDocVQA
Retrieval
QA
Context-Independent
Visual Semantic Search
Multi-Hop
Document ContentsT, L, F, C, DT, LT, L, F, C, D
Answer Types-ExtExt, Abs
#Document Types61Open
#QAs3,81015,00043,474
#Images (Pages)8,310158,000206,267
", + "image_path": "00e4521b82c8ff62a3115b30dc25ed033f6dacd64ce4acc20b5c55599e537218.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "table_body" + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 197, + 555, + 230 + ], + "lines": [ + { + "bbox": [ + 313, + 197, + 555, + 230 + ], + "spans": [ + { + "bbox": [ + 313, + 197, + 555, + 230 + ], + "type": "text", + "content": "Table 1. Comparison of related datasets. Document contents include (T)able, (L)ist, (F)igure, (C)hart, and (D)iagram. Answer types are Extractive (Ext) and Abstractive (Abs)." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 254, + 555, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 254, + 555, + 386 + ], + "spans": [ + { + "bbox": [ + 313, + 254, + 555, + 386 + ], + "type": "text", + "content": "entity (e.g., Denmark) in the answer to a single-hop question and then searched for this entity in other single-hop questions. (2) Next, we used Mixtral-8x22B [24] to combine the two single-hop questions. (3) We filtered the generated multi-hop questions using another LLM (GPT-4o [2]), which answered the questions based on the context of the two initial single-hop questions and their answers. If the predicted answer was the same as the answer to the second single-hop question, the multi-hop question was validated. Finally, we manually reviewed the filtered questions to ensure their quality before including them in our dataset." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 407, + 554, + 492 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 407, + 554, + 492 + ], + "spans": [ + { + "bbox": [ + 313, + 407, + 554, + 492 + ], + "type": "text", + "content": "Negative candidates mining. We produced negative image candidates for retrievers to sift through for every question, used only during inference. We first extracted OCR text from images in the COYO-700M dataset [6], a web-scaled image collection. Subsequently, we mined negative images where the OCR text exhibits high lexical overlap with the question but does not contain the correct answer." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 503, + 500, + 516 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 503, + 500, + 516 + ], + "spans": [ + { + "bbox": [ + 313, + 503, + 500, + 516 + ], + "type": "text", + "content": "3.3. Comparison with Related Datasets" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 521, + 555, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 521, + 555, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 521, + 555, + 715 + ], + "type": "text", + "content": "Table 1 shows the statistics of OpenDocVQA and other related datasets, including ViDoRe [17] and Dureader_vis [46]. OpenDocVQA has three unique key properties: First, it is the first large-scale collection of open-domain DocumentVQA datasets to address open document types, whereas ViDoRe considers six document types for only the retrieval task and Dureader_vis is limited to webpages. Second, the questions in OpenDocVQA are context-independent and require visual semantic search, whereas ViDoRe's questions are context-dependent, and even lexical search methods yield sufficient performance in Dureader_vis. This indicates our dataset better reflects real-world scenarios. Lastly, unlike ViDoRe and Dureader_vis, OpenDocVQA requires multi-hop reasoning with extractive (e.g., span, list) and abstractive (e.g., arithmetic, counting, no answer) answer types, providing a more challenging setting." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 69, + 71, + 302, + 208 + ], + "blocks": [ + { + "bbox": [ + 69, + 71, + 302, + 208 + ], + "lines": [ + { + "bbox": [ + 69, + 71, + 302, + 208 + ], + "spans": [ + { + "bbox": [ + 69, + 71, + 302, + 208 + ], + "type": "image", + "image_path": "621d716df05ee2f430077f5046584325bfcf604ee584d3e4693b631080d1dc7e.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "lines": [ + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "spans": [ + { + "bbox": [ + 55, + 220, + 555, + 243 + ], + "type": "text", + "content": "Figure 3. Overview of our VDocRAG model. VDocRetriever retrieves document images related to the question from a corpus of document images, and VDocGenerator uses these retrieved images to generate the answer." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 305, + 72, + 538, + 209 + ], + "blocks": [ + { + "bbox": [ + 305, + 72, + 538, + 209 + ], + "lines": [ + { + "bbox": [ + 305, + 72, + 538, + 209 + ], + "spans": [ + { + "bbox": [ + 305, + 72, + 538, + 209 + ], + "type": "image", + "image_path": "fe65f01872bf00c487f537d7f1b317c96711c617b5febf57e0c36686929945c1.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "bbox": [ + 55, + 262, + 154, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 262, + 154, + 276 + ], + "spans": [ + { + "bbox": [ + 55, + 262, + 154, + 276 + ], + "type": "text", + "content": "4. Proposed Model" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 55, + 282, + 185, + 295 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 282, + 185, + 295 + ], + "spans": [ + { + "bbox": [ + 55, + 282, + 185, + 295 + ], + "type": "text", + "content": "4.1. Architecture Overview" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 301, + 296, + 361 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 301, + 296, + 361 + ], + "spans": [ + { + "bbox": [ + 55, + 301, + 296, + 361 + ], + "type": "text", + "content": "As shown in Figure 3, VDocRAG consists of two components: VDocRetriever and VDocGenerator. Our approach adopts the pre-trained LVLMs to unify the varied formats and modalities in a single form as an image for direct document understanding." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "spans": [ + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "text", + "content": "Dynamic high-resolution image encoding. To encode high-resolution images with various aspect ratios, a dynamic cropping [14, 65] is utilized to split the image into smaller patches while maintaining the integrity of the original aspect ratio. Each patch is a small image with " + }, + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "inline_equation", + "content": "336 \\times 336" + }, + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "text", + "content": " size, and we treat them as individual inputs for the image encoder. After encoding images, we convert them via a projector (two-layer MLP) into visual document features " + }, + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{\\mathrm{d}}" + }, + { + "bbox": [ + 54, + 380, + 296, + 477 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "spans": [ + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": "VDocRetriever. VDocRetriever is an LVLM-based dual-encoder architecture that encodes queries and document images independently. We append an " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "<\\mathrm{EOS}>" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " token to the end of the question and visual document features " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "\\mathbf{z}_{\\mathrm{d}}" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": ", and then feed them into the LLM to obtain the question and visual document embeddings " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "(\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}})" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " by taking the last layer " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "<\\mathrm{EOS}>" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " vector. Then, it retrieves " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " documents " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{I}}" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " with the " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": " highest similarity scores to the question. Formally, the similarity scores between the question and visual document embeddings are computed via maximum inner product search [15], as follows: " + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "inline_equation", + "content": "\\mathrm{SIM}(\\mathbf{h}_{\\mathrm{q}}, \\mathbf{h}_{\\mathrm{d}}) = \\frac{\\mathbf{h}_{\\mathrm{q}}^{\\top} \\mathbf{h}_{\\mathrm{d}}}{\\|\\mathbf{h}_{\\mathrm{q}}\\| \\|\\mathbf{h}_{\\mathrm{d}}\\|}" + }, + { + "bbox": [ + 55, + 495, + 296, + 636 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": "VDocGenerator. VDocGenerator adapts LVLM to generate answers " + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "inline_equation", + "content": "A" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": " given the question " + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": " and the retrieved " + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": " documents " + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "inline_equation", + "content": "\\hat{\\mathcal{I}}" + }, + { + "bbox": [ + 55, + 653, + 296, + 713 + ], + "type": "text", + "content": " obtained from VDocRetriever. After encoding the retrieval result, we concatenate the question and the encoded result, then feed this combined input into the LLM." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 313, + 262, + 499, + 276 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 262, + 499, + 276 + ], + "spans": [ + { + "bbox": [ + 313, + 262, + 499, + 276 + ], + "type": "text", + "content": "4.2. Self-Supervised Pre-training Tasks" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 280, + 555, + 400 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 280, + 555, + 400 + ], + "spans": [ + { + "bbox": [ + 313, + 280, + 555, + 400 + ], + "type": "text", + "content": "Figure 4a and 4b show our pre-taining tasks in VDocRetriever. The goal of pre-training is to transfer the powerful understanding and generation abilities of LVLMs to facilitate their usage in visual document retrieval. To this end, we propose two new self-supervised pre-training tasks to compress the entire image representation into the token at the end of the input image. Our pre-training process passes the document image, and its extracted OCR text is used as a pseudo target. Full pre-training objectives is defined as " + }, + { + "bbox": [ + 313, + 280, + 555, + 400 + ], + "type": "inline_equation", + "content": "\\mathcal{L} = \\mathcal{L}_{\\mathrm{RCR}} + \\mathcal{L}_{\\mathrm{RCG}}" + }, + { + "bbox": [ + 313, + 280, + 555, + 400 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 415, + 554, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 415, + 554, + 511 + ], + "spans": [ + { + "bbox": [ + 313, + 415, + 554, + 511 + ], + "type": "text", + "content": "Representation Compression via Retrieval (RCR). We compress image representations with a contrastive learning task that retrieves images relevant to their corresponding OCR text, by leveraging LVLM's image understanding capabilities. As shown in Figure 4a, we first construct positive OCR text-image pairs " + }, + { + "bbox": [ + 313, + 415, + 554, + 511 + ], + "type": "inline_equation", + "content": "(\\mathbf{h}_0,\\mathbf{h}_{\\mathrm{d}^+})" + }, + { + "bbox": [ + 313, + 415, + 554, + 511 + ], + "type": "text", + "content": " from raw unlabeled document images. Then, we adopt in-batch negatives to calculate the contrastive loss by InfoNCE [44] as follows:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 347, + 519, + 553, + 547 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 347, + 519, + 553, + 547 + ], + "spans": [ + { + "bbox": [ + 347, + 519, + 553, + 547 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R C R}} = - \\log \\frac {\\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} ^ {+}}\\right) / \\tau\\right)}{\\sum_ {i \\in \\mathcal {B}} \\exp \\left(\\operatorname {S I M} \\left(\\mathbf {h} _ {\\mathrm {o}}, \\mathbf {h} _ {\\mathrm {d} _ {i}}\\right) / \\tau\\right)}, \\tag {1}", + "image_path": "113d666e0d4830fb90f365048e0bf49664e7fea3e1aae273b56278576ea5f2f6.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "spans": [ + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "text", + "content": " is a temperature hyperparameter to scale the logits, and " + }, + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "inline_equation", + "content": "\\mathcal{B}" + }, + { + "bbox": [ + 313, + 555, + 553, + 578 + ], + "type": "text", + "content": " represents the batch size." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 594, + 555, + 714 + ], + "type": "text", + "content": "Representation Compression via Generation (RCG). We propose a representation training strategy that leverages the generative capabilities of LVLMs through a customized attention mask matrix. As depicted in Figure 4b, representations for the image tokens, including the token, are obtained via a standard auto-regressive process. In contrast, for the subsequent L OCR token representations, we mask the image token representations and allow only the attention of token and the preceding OCR tokens. This approach facilitates pooling the image representations" + } + ] + } + ], + "index": 14 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 70, + 79, + 83, + 92 + ], + "blocks": [ + { + "bbox": [ + 70, + 79, + 83, + 92 + ], + "lines": [ + { + "bbox": [ + 70, + 79, + 83, + 92 + ], + "spans": [ + { + "bbox": [ + 70, + 79, + 83, + 92 + ], + "type": "image", + "image_path": "dafe7f71f690b5d09e728783c8143871a4e73e05a3f58810e902020a4d0ef7af.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 83, + 83, + 108, + 90 + ], + "lines": [ + { + "bbox": [ + 83, + 83, + 108, + 90 + ], + "spans": [ + { + "bbox": [ + 83, + 83, + 108, + 90 + ], + "type": "text", + "content": "Trainable" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 114, + 83, + 143, + 90 + ], + "blocks": [ + { + "bbox": [ + 114, + 83, + 143, + 90 + ], + "lines": [ + { + "bbox": [ + 114, + 83, + 143, + 90 + ], + "spans": [ + { + "bbox": [ + 114, + 83, + 143, + 90 + ], + "type": "image", + "image_path": "5f6a79669dc7ecbbfe97764c217ad7216d08cf0faab4ea84eb343c07e9058ad6.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 66, + 103, + 218, + 217 + ], + "blocks": [ + { + "bbox": [ + 66, + 103, + 218, + 217 + ], + "lines": [ + { + "bbox": [ + 66, + 103, + 218, + 217 + ], + "spans": [ + { + "bbox": [ + 66, + 103, + 218, + 217 + ], + "type": "image", + "image_path": "9f5bf16af03ad103a698c0321a9b58c638532434e40ee551f0fd81b372907c97.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 221, + 219, + 232 + ], + "lines": [ + { + "bbox": [ + 61, + 221, + 219, + 232 + ], + "spans": [ + { + "bbox": [ + 61, + 221, + 219, + 232 + ], + "type": "text", + "content": "(a) Representation Compression via Retrieval (RCR)" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 228, + 73, + 397, + 218 + ], + "blocks": [ + { + "bbox": [ + 182, + 76, + 274, + 86 + ], + "lines": [ + { + "bbox": [ + 182, + 76, + 274, + 86 + ], + "spans": [ + { + "bbox": [ + 182, + 76, + 274, + 86 + ], + "type": "text", + "content": "Self-Supervised Pre-training" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 228, + 73, + 397, + 218 + ], + "lines": [ + { + "bbox": [ + 228, + 73, + 397, + 218 + ], + "spans": [ + { + "bbox": [ + 228, + 73, + 397, + 218 + ], + "type": "image", + "image_path": "9ba945bae9646b0a77bd959c1908abe2702a7b534271c0c4831d92704d0e3d4d.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 229, + 221, + 394, + 232 + ], + "lines": [ + { + "bbox": [ + 229, + 221, + 394, + 232 + ], + "spans": [ + { + "bbox": [ + 229, + 221, + 394, + 232 + ], + "type": "text", + "content": "(b) Representation Compression via Generation (RCG)" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 401, + 103, + 550, + 219 + ], + "blocks": [ + { + "bbox": [ + 436, + 76, + 512, + 87 + ], + "lines": [ + { + "bbox": [ + 436, + 76, + 512, + 87 + ], + "spans": [ + { + "bbox": [ + 436, + 76, + 512, + 87 + ], + "type": "text", + "content": "Supervised Fine-tuning" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 401, + 103, + 550, + 219 + ], + "lines": [ + { + "bbox": [ + 401, + 103, + 550, + 219 + ], + "spans": [ + { + "bbox": [ + 401, + 103, + 550, + 219 + ], + "type": "image", + "image_path": "fb412815b242fc2172d8d550dc1d382145390815e4d89360f0f95a5919c3c6c0.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 430, + 221, + 523, + 231 + ], + "lines": [ + { + "bbox": [ + 430, + 221, + 523, + 231 + ], + "spans": [ + { + "bbox": [ + 430, + 221, + 523, + 231 + ], + "type": "text", + "content": "(c) Visual Document Retrieval" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 244, + 555, + 268 + ], + "lines": [ + { + "bbox": [ + 55, + 244, + 555, + 268 + ], + "spans": [ + { + "bbox": [ + 55, + 244, + 555, + 268 + ], + "type": "text", + "content": "Figure 4. Our pre-training tasks using unlabeled documents and fine-tuning in VDocRetriever. The RCR task retrieves relevant images given corresponding OCR tokens, and the RCG task outputs OCR tokens by paying attention to only the token." + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_caption" + } + ], + "index": 9 + }, + { + "type": "table", + "bbox": [ + 58, + 285, + 294, + 396 + ], + "blocks": [ + { + "bbox": [ + 58, + 285, + 294, + 396 + ], + "lines": [ + { + "bbox": [ + 58, + 285, + 294, + 396 + ], + "spans": [ + { + "bbox": [ + 58, + 285, + 294, + 396 + ], + "type": "table", + "html": "
DatasetDocuments%Filtered#Images#Train&Dev#Test
DocVQA [42]Industry84.812,7676,382-
InfoVQA [43]Infographic61.25,4859,5921,048
VisualMRC [56]Webpage71.910,2296,126-
ChartQA [41]Chart94.020,882-150
OpenWikiTable [27]Table0.01,2574,261-
DUDE [28]Open92.327,9552,135496
MPMQA [68]Manual81.710,0183,054-
SlideVQA [57]\\$Slide66.752,380-760
MHDocVQA\\$Open9.528,5509,470-
", + "image_path": "1a194f91f9eb8affa85635e923392a2b9e31bb226c9ea35d790ff450f915ec84.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 404, + 296, + 439 + ], + "lines": [ + { + "bbox": [ + 55, + 404, + 296, + 439 + ], + "spans": [ + { + "bbox": [ + 55, + 404, + 296, + 439 + ], + "type": "text", + "content": "Table 2. Datasets in OpenDocVQA. " + }, + { + "bbox": [ + 55, + 404, + 296, + 439 + ], + "type": "inline_equation", + "content": "\\S" + }, + { + "bbox": [ + 55, + 404, + 296, + 439 + ], + "type": "text", + "content": " denotes datasets requiring multi-hop reasoning. Note that MHDocVQA was created using only the training datasets." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 55, + 456, + 261, + 469 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 456, + 261, + 469 + ], + "spans": [ + { + "bbox": [ + 55, + 456, + 261, + 469 + ], + "type": "text", + "content": "into " + }, + { + "bbox": [ + 55, + 456, + 261, + 469 + ], + "type": "inline_equation", + "content": "<\\mathsf{EOS}>" + }, + { + "bbox": [ + 55, + 456, + 261, + 469 + ], + "type": "text", + "content": " token. The loss function is defined as:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 94, + 473, + 295, + 506 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 94, + 473, + 295, + 506 + ], + "spans": [ + { + "bbox": [ + 94, + 473, + 295, + 506 + ], + "type": "interline_equation", + "content": "\\mathcal {L} _ {\\mathrm {R C G}} = - \\frac {1}{L} \\sum_ {i = 1} ^ {L} \\log p \\left(y _ {i} \\mid y _ {< i}, < \\mathrm {E O S} >\\right), \\tag {2}", + "image_path": "1f97e6f0def4cec4b8ceb886743b573b37cc841d1acb17b683d13dbe3cf287ab.jpg" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "spans": [ + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "inline_equation", + "content": "y_{i}" + }, + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "text", + "content": " denotes the " + }, + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 55, + 511, + 234, + 523 + ], + "type": "text", + "content": "-th token of the OCR." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 529, + 188, + 542 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 529, + 188, + 542 + ], + "spans": [ + { + "bbox": [ + 55, + 529, + 188, + 542 + ], + "type": "text", + "content": "4.3. Supervised Fine-tuning" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 547, + 296, + 620 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 547, + 296, + 620 + ], + "spans": [ + { + "bbox": [ + 55, + 547, + 296, + 620 + ], + "type": "text", + "content": "We first fine-tune the VDocRetriever with the contrastive learning objective using query-document pairs with in-batch negatives (see Figure 4c). Then, we apply the trained VDocRetriever to search over the corpus " + }, + { + "bbox": [ + 55, + 547, + 296, + 620 + ], + "type": "inline_equation", + "content": "\\mathcal{I}" + }, + { + "bbox": [ + 55, + 547, + 296, + 620 + ], + "type": "text", + "content": " to feed the top-k documents into the VDocGenerator. Finally, we train the VDocGenerator using the next-token prediction objective." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 55, + 628, + 137, + 642 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 628, + 137, + 642 + ], + "spans": [ + { + "bbox": [ + 55, + 628, + 137, + 642 + ], + "type": "text", + "content": "5. Experiments" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 55, + 647, + 171, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 647, + 171, + 661 + ], + "spans": [ + { + "bbox": [ + 55, + 647, + 171, + 661 + ], + "type": "text", + "content": "5.1. Experimental Setup" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 713 + ], + "type": "text", + "content": "Pre-training dataset. For pre-training, we gathered 500k samples containing document image and OCR text pairs filtered from the DocStruct4M [20]. We excluded any images that appeared in the test set to avoid data contamination." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 313, + 288, + 555, + 396 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 288, + 555, + 396 + ], + "spans": [ + { + "bbox": [ + 313, + 288, + 555, + 396 + ], + "type": "text", + "content": "Fine-tuning and evaluation datasets. We evaluated our models in both zero-shot and supervised settings. The zero-shot evaluation assessed the models' generalization capabilities on unseen datasets, while the supervised evaluation measured performance when training samples were available. As shown in Table 2, we trained our models on seven datasets and evaluated them on four datasets, including ChartQA and SlideVQA in the zero-shot setting, and InfoVQA and DUDE in the supervised setting." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 411, + 556, + 555 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 411, + 556, + 555 + ], + "spans": [ + { + "bbox": [ + 313, + 411, + 556, + 555 + ], + "type": "text", + "content": "Implementation details. We initialized VDocRAG with Phi3V [1], a state-of-the-art LVLM trained on high-resolution images and multi-image data. The parameters of VDocRetriever and VDocGenerator were not shared. We employed LoRA [21] with LLM while keeping other parameters frozen during training. We trained VDocRAG for one epoch on eight A100-80G GPUs with AdamW [36] optimizer and FlashAttention [11], using batch sizes of 16 for pre-training and 64 for fine-tuning. We set the temperature " + }, + { + "bbox": [ + 313, + 411, + 556, + 555 + ], + "type": "inline_equation", + "content": "\\tau" + }, + { + "bbox": [ + 313, + 411, + 556, + 555 + ], + "type": "text", + "content": " to 0.01. We applied Tesseract [54] to extract OCR text in images. By default, we used the top three documents obtained from VDocRetirver." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 570, + 556, + 714 + ], + "type": "text", + "content": "Retrieval baselines. We compared VDocRetriever with two categories of retrievers. The first category includes off-the-shelf text retrieval models on extracted text and image retrieval models. These consist of BM25 [52], a lexical matching model; Contriver [22], E5 [59], and GTE [34], which are popular strong text embedding models based on BERT [12]; E5-Mistral [60] and NV-Embedv2 [30], which are state-of-the-art LLM-based embedding models; CLIP [47], a dual-encoder vision-language model; DSE [37] and VisRAG-Ret [66], which are state-of-the-art visual document retrieval models. The second category includes fine-tuned models trained on OpenDocVQA. To" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 59, + 70, + 552, + 237 + ], + "blocks": [ + { + "bbox": [ + 59, + 70, + 552, + 237 + ], + "lines": [ + { + "bbox": [ + 59, + 70, + 552, + 237 + ], + "spans": [ + { + "bbox": [ + 59, + 70, + 552, + 237 + ], + "type": "table", + "html": "
ModelInitDocsScale#PT#FTChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Off-the-shelf
BM25 [52]-Text00054.815.640.738.750.231.357.247.5
Contriever [22]BERT [12]Text110M1B500K66.959.350.846.542.521.040.629.7
E5 [59]BERT [12]Text110M270M1M74.966.353.649.649.226.945.038.9
GTE [34]BERT [12]Text110M788M3M72.864.755.449.151.332.542.436.0
E5-Mistral [60]Mistral [23]Text7.1B01.85M72.370.063.857.660.333.952.245.2
NV-Embed-v2 [30]Mistral [23]Text7.9B02.46M75.370.761.758.156.534.243.038.6
CLIP [47]ScratchImage428M400M054.638.638.129.745.320.623.217.6
DSE [37]Phi3V [1]Image4.2B05.61M72.768.573.067.267.449.655.547.7
VisRAG-Ret [66]MiniCPM-V [63]Image3.4B0240K87.2*75.5*74.3*68.4*71.9*51.7*56.444.5
Trained on OpenDocVQA
Phi3 [1]Phi3V [1]Text4B041K72.565.353.348.453.2*33.0*40.5*32.0*
VDocRetriever†Phi3V [1]Image4.2B041K84.2+11.774.8+9.571.0+17.765.1+16.766.8*+13.652.8*+19.848.4*+7.941.0*+9.0
VDocRetrieverPhi3V [1]Image4.2B500K41K86.0+1.876.4+1.677.3+6.373.3+8.272.9*+6.155.5*+2.757.7*+9.350.9*+9.9
", + "image_path": "d3a6583b9ab0f54d816ebe6f818bb357823c7fc0b4fbdeaae0b828e9de90fee4.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 89, + 289, + 523, + 404 + ], + "blocks": [ + { + "bbox": [ + 55, + 245, + 555, + 281 + ], + "lines": [ + { + "bbox": [ + 55, + 245, + 555, + 281 + ], + "spans": [ + { + "bbox": [ + 55, + 245, + 555, + 281 + ], + "type": "text", + "content": "Table 3. Retrieval results under the single- (Single) and all-pool (All) settings. * indicates performance on test data for which corresponding training samples are available. All other results represent zero-shot performance. Init, FT, and PT denote the initialization model, finetuning, and pre-training, respectively. Performance gains in green and blue are compared to the base LLM and VDocRetirver†, respectively." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 89, + 289, + 523, + 404 + ], + "lines": [ + { + "bbox": [ + 89, + 289, + 523, + 404 + ], + "spans": [ + { + "bbox": [ + 89, + 289, + 523, + 404 + ], + "type": "table", + "html": "
GeneratorRetrieverDocsChartQASlideVQAInfoVQADUDE
SingleAllSingleAllSingleAllSingleAll
Closed-book
Phi3--20.020.020.320.334.9*34.9*23.1*23.1*
Text-based RAG
Phi3Phi3Text28.028.028.628.040.5*39.1*40.1*35.7*
Phi3GoldText36.636.627.827.845.6*45.6*55.9*55.9*
VDocRAG (Ours)
VDocGeneratorVDocRetrieverImage52.0+24.048.0+20.044.2+15.642.0+14.056.2*+15.749.2*+10.148.5*+8.444.0*+8.3
VDocGeneratorGoldImage74.074.056.456.464.6*64.6*66.4*66.4*
", + "image_path": "54efb87ac6dad28683019a443b5e882e7f23fa45ba4fe6d62ae166f575a26382.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 54, + 413, + 555, + 448 + ], + "lines": [ + { + "bbox": [ + 54, + 413, + 555, + 448 + ], + "spans": [ + { + "bbox": [ + 54, + 413, + 555, + 448 + ], + "type": "text", + "content": "Table 4. DocumentVQA results. All models are fine-tuned on OpenDocVQA. The results marked with * denote performance on unseen test samples, and the other results represent zero-shot performance. The performance gain in green is compared to the text-based RAG that has the same base LLM. Gold knows the ground-truth documents. Models answer the question based on the top three retrieval results." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 54, + 467, + 294, + 528 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 467, + 294, + 528 + ], + "spans": [ + { + "bbox": [ + 54, + 467, + 294, + 528 + ], + "type": "text", + "content": "verify the effectiveness of encoding documents through images, we fine-tuned the LLM in VDocRetriever (Phi3 [1]) using extracted text to represent documents. Additionally, we included a variant of VDocRetriever without pretraining (VDocRetriever†)." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 54, + 542, + 295, + 639 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 542, + 295, + 639 + ], + "spans": [ + { + "bbox": [ + 54, + 542, + 295, + 639 + ], + "type": "text", + "content": "QA baselines. We compared VDocRAG against closed-book and text-based RAG models. These baselines used the same model initialization as VDocRAG but fine-tuned only the LLM (Phi3). The closed-book model received only the question as input, while the text-based RAG used the top three documents retrieved by the Phi3 retriever. Moreover, we assessed possible upper-bound performance by testing generation with ground-truth (Gold) documents." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 54, + 653, + 295, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 54, + 653, + 295, + 715 + ], + "spans": [ + { + "bbox": [ + 54, + 653, + 295, + 715 + ], + "type": "text", + "content": "Evaluation metrics. We evaluated retrieval performance using nDCG@5, a widely used metric in information retrieval [17, 25]. For the DocumentVQA task, we followed the evaluation protocol of each dataset, we used ANLS [4] for InfoVQA and DUDE, Relaxed Accuracy [41] for" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 313, + 467, + 517, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 467, + 517, + 480 + ], + "spans": [ + { + "bbox": [ + 313, + 467, + 517, + 480 + ], + "type": "text", + "content": "ChartQA, F1 for SlideVQA as evaluation metrics." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 488, + 417, + 500 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 488, + 417, + 500 + ], + "spans": [ + { + "bbox": [ + 313, + 488, + 417, + 500 + ], + "type": "text", + "content": "5.2. Retrieval Results" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 312, + 506, + 555, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 312, + 506, + 555, + 663 + ], + "spans": [ + { + "bbox": [ + 312, + 506, + 555, + 663 + ], + "type": "text", + "content": "Table 3 shows that VDocRetriever† achieved significantly higher retrieval performance than the text-based Phi3 retriever on all datasets under the same conditions. This indicates that our model can effectively encode documents in image format for retrieval tasks. Furthermore, VDocRetriever exhibits superior zero-shot generalization on unseen datasets, ChartQA and SlideVQA, outperforming both off-the-shelf text retrievers and state-of-the-art visual document retrieval models. Notably, DSE was initialized with the same LVLM as ours and fine-tuned on 13.7 times more data. This highlights that our pre-training strategy and the OpenDocVQA dataset offer unique advantages that are not adequately addressed by existing approaches." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 671, + 531, + 685 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 671, + 531, + 685 + ], + "spans": [ + { + "bbox": [ + 313, + 671, + 531, + 685 + ], + "type": "text", + "content": "5.3. Retrieval-Augmented Generation Results" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 713 + ], + "type": "text", + "content": "Table 4 shows that VDocRAG significantly outperformed both the closed-book LLM and the text-based RAG on" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 58, + 70, + 294, + 146 + ], + "blocks": [ + { + "bbox": [ + 58, + 70, + 294, + 146 + ], + "lines": [ + { + "bbox": [ + 58, + 70, + 294, + 146 + ], + "spans": [ + { + "bbox": [ + 58, + 70, + 294, + 146 + ], + "type": "table", + "html": "
ModelSlideVQAInfoVQA
VDocRetriever77.372.9
w/o RCR75.9-1.471.1-1.8
w/o RCG71.7-5.668.8-4.1
w/o RCG & RCR71.0-6.366.8-6.1
w/o LLM & Projector (→CLIP encoders)43.7-33.637.9-35.0
", + "image_path": "daa3016ce5d5e7782cea565c8faa26b1c6efa12759c7e13273fa9d8ffc3e6863.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 58, + 188, + 298, + 251 + ], + "blocks": [ + { + "bbox": [ + 55, + 154, + 295, + 177 + ], + "lines": [ + { + "bbox": [ + 55, + 154, + 295, + 177 + ], + "spans": [ + { + "bbox": [ + 55, + 154, + 295, + 177 + ], + "type": "text", + "content": "Table 5. Ablation study of our pre-training tasks and model architecture in the retrieval task under the single-pool setting." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 58, + 188, + 298, + 251 + ], + "lines": [ + { + "bbox": [ + 58, + 188, + 298, + 251 + ], + "spans": [ + { + "bbox": [ + 58, + 188, + 298, + 251 + ], + "type": "table", + "html": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
VDocRAG77.372.944.256.2
w/o MHDocVQA75.0-2.371.4-1.543.4-0.853.8-2.4
w/o except MHDocVQA68.8-8.561.7-11.241.1-3.144.0-12.2
", + "image_path": "4e61e1c195bf6000753a2dd640be8401758f0293e8d421f6f33ad6103d78a7f0.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 56, + 293, + 176, + 396 + ], + "blocks": [ + { + "bbox": [ + 56, + 293, + 176, + 396 + ], + "lines": [ + { + "bbox": [ + 56, + 293, + 176, + 396 + ], + "spans": [ + { + "bbox": [ + 56, + 293, + 176, + 396 + ], + "type": "image", + "image_path": "73871c05f45431f46c97eae932f29b3326fe511a2575664d872318fe1abcd46e.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 75, + 401, + 165, + 412 + ], + "lines": [ + { + "bbox": [ + 75, + 401, + 165, + 412 + ], + "spans": [ + { + "bbox": [ + 75, + 401, + 165, + 412 + ], + "type": "text", + "content": "(a) Retrieval performance" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 55, + 425, + 296, + 449 + ], + "lines": [ + { + "bbox": [ + 55, + 425, + 296, + 449 + ], + "spans": [ + { + "bbox": [ + 55, + 425, + 296, + 449 + ], + "type": "text", + "content": "Figure 5. Performance under different document lengths on InfoVQA (single-pool setting)." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 179, + 294, + 296, + 396 + ], + "blocks": [ + { + "bbox": [ + 179, + 294, + 296, + 396 + ], + "lines": [ + { + "bbox": [ + 179, + 294, + 296, + 396 + ], + "spans": [ + { + "bbox": [ + 179, + 294, + 296, + 396 + ], + "type": "image", + "image_path": "df449c705825c0ab0b8705e5bf0f02771596144cbceb6c4291677a1ba17ed475.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 202, + 401, + 272, + 411 + ], + "lines": [ + { + "bbox": [ + 202, + 401, + 272, + 411 + ], + "spans": [ + { + "bbox": [ + 202, + 401, + 272, + 411 + ], + "type": "text", + "content": "(b) QA performance" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 470, + 296, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 470, + 296, + 602 + ], + "spans": [ + { + "bbox": [ + 55, + 470, + 296, + 602 + ], + "type": "text", + "content": "the DocumentVQA task, even when all models were the same initialization. Additionally, when the retrieval results were fixed to ground-truth (Gold) documents, VDocRAG demonstrated superior performance to text-based RAG. This underscores the importance of visual cues in extracting answers from documents and suggests that VDocGenerator has a higher upper-bound performance. Both text-based RAG and VDocRAG exhibited substantial improvements when provided with ground-truth documents, highlighting potential areas for enhancing retrieval accuracy and improving the generator's robustness to retrieval noise." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 611, + 118, + 624 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 611, + 118, + 624 + ], + "spans": [ + { + "bbox": [ + 55, + 611, + 118, + 624 + ], + "type": "text", + "content": "5.4. Analysis" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 629, + 296, + 715 + ], + "type": "text", + "content": "Can our pre-training tasks be beneficial? Table 5 shows that VDocRetriever outperformed the model without pretraining. Removing each pre-training task or both RCG and RCR tasks decreased performance, indicating that both tasks contribute complementarily. These validate that our pre-training effectively learns to compress image features while aligning them with textual contents in images." + } + ] + } + ], + "index": 11 + }, + { + "type": "table", + "bbox": [ + 331, + 70, + 540, + 125 + ], + "blocks": [ + { + "bbox": [ + 55, + 259, + 296, + 282 + ], + "lines": [ + { + "bbox": [ + 55, + 259, + 296, + 282 + ], + "spans": [ + { + "bbox": [ + 55, + 259, + 296, + 282 + ], + "type": "text", + "content": "Table 6. Ablation study of our dataset in retrieval and QA tasks under the single-pool setting." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 331, + 70, + 540, + 125 + ], + "lines": [ + { + "bbox": [ + 331, + 70, + 540, + 125 + ], + "spans": [ + { + "bbox": [ + 331, + 70, + 540, + 125 + ], + "type": "table", + "html": "
ModelRetrievalQA
OCREncodingGenerationTotal
Text-based RAGphi3590.070.7422.71083.4
VDocRAG-204.4789.7994.1
", + "image_path": "d4e697531784a0bdf0197a13e8f684df42e426dd4eef7cbab43178197d461901.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "table_body" + } + ], + "index": 12 + }, + { + "type": "table", + "bbox": [ + 317, + 178, + 553, + 243 + ], + "blocks": [ + { + "bbox": [ + 313, + 133, + 555, + 166 + ], + "lines": [ + { + "bbox": [ + 313, + 133, + 555, + 166 + ], + "spans": [ + { + "bbox": [ + 313, + 133, + 555, + 166 + ], + "type": "text", + "content": "Table 7. Efficiency analysis on InfoVQA. The average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU." + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 178, + 553, + 243 + ], + "lines": [ + { + "bbox": [ + 317, + 178, + 553, + 243 + ], + "spans": [ + { + "bbox": [ + 317, + 178, + 553, + 243 + ], + "type": "table", + "html": "
ModelRetrievalQA
SlideVQAInfoVQASlideVQAInfoVQA
Text-based RAGLLama360.161.837.849.5
VDocRAGIdefics373.472.548.959.9
w/o Pre-train70.369.847.259.6
", + "image_path": "cb5bfec95a153aac6c77d40d97d65aefdbbaadf610a541398d6e0b294385ba9e.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "table_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 251, + 555, + 274 + ], + "lines": [ + { + "bbox": [ + 313, + 251, + 555, + 274 + ], + "spans": [ + { + "bbox": [ + 313, + 251, + 555, + 274 + ], + "type": "text", + "content": "Table 8. Analysis with different LVLM (Idefics3) in retrieval and QA tasks under the single-pool setting." + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 313, + 296, + 555, + 369 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 296, + 555, + 369 + ], + "spans": [ + { + "bbox": [ + 313, + 296, + 555, + 369 + ], + "type": "text", + "content": "Does LLM help understanding document images? Table 5 shows that retrieval performance dropped substantially when the LLM block was removed, leaving only the CLIP text/vision encoder, even with the same visual transformer backbone. This suggests that LLM can capture finer-grained visual details and enhance semantic understanding." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 387, + 556, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 387, + 556, + 495 + ], + "spans": [ + { + "bbox": [ + 313, + 387, + 556, + 495 + ], + "type": "text", + "content": "Does our dataset improve the performance? Table 6 shows that removing MHDocVQA caused a performance decrease, indicating that MHDocVQA requires distinct reasoning skills compared to other collected datasets in OpenDocVQA. Additionally, excluding all OpenDocVQA datasets except MHDocVQA led to a significant performance drop. This confirms that our collected datasets effectively supplement the missing capabilities of LVLM in document retrieval and understanding." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 514, + 556, + 611 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 514, + 556, + 611 + ], + "spans": [ + { + "bbox": [ + 313, + 514, + 556, + 611 + ], + "type": "text", + "content": "How well does VDocRAG perform under different document lengths? Figure 5 shows that VDocRAG consistently outperforms text-based RAG, indicating that VDocRAG can better understand documents through visual information. In general, we observed that the VDocRAG's relative performance over text-based RAG is larger for images with 0-10 words (+66.0 in retrieval, +21.1 in QA) than for those with 500+ words (+28.4 in retrieval, +16.7 in QA)." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "content": "Is VDocRAG more efficient than text-based RAG? Table 7 shows that VDocRAG is more efficient than text-based RAG. Especially, VDocRAG requires " + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "inline_equation", + "content": "69\\%" + }, + { + "bbox": [ + 313, + 629, + 556, + 715 + ], + "type": "text", + "content": " less inference time to retrieve documents than text-based RAG. Although VDocRetriever takes more time for document encoding and generation, it eliminates the time-consuming OCR processing necessary for text-based RAG." + } + ] + } + ], + "index": 19 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 58, + 71, + 553, + 252 + ], + "blocks": [ + { + "bbox": [ + 58, + 71, + 553, + 252 + ], + "lines": [ + { + "bbox": [ + 58, + 71, + 553, + 252 + ], + "spans": [ + { + "bbox": [ + 58, + 71, + 553, + 252 + ], + "type": "image", + "image_path": "e6b5f824fb272dd5a3145c39a1ecf37d59a1fd8717888828b2d0f0e211e90863.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 172, + 259, + 437, + 271 + ], + "lines": [ + { + "bbox": [ + 172, + 259, + 437, + 271 + ], + "spans": [ + { + "bbox": [ + 172, + 259, + 437, + 271 + ], + "type": "text", + "content": "Figure 6. Qualitative results of VDocRAG compared to text-based RAG." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 62, + 290, + 164, + 386 + ], + "blocks": [ + { + "bbox": [ + 62, + 290, + 164, + 386 + ], + "lines": [ + { + "bbox": [ + 62, + 290, + 164, + 386 + ], + "spans": [ + { + "bbox": [ + 62, + 290, + 164, + 386 + ], + "type": "image", + "image_path": "957d84b5828bdd5a612e32c2651bbbda754f02c37807084e283aa9a19c30dc49.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 61, + 389, + 175, + 407 + ], + "lines": [ + { + "bbox": [ + 61, + 389, + 175, + 407 + ], + "spans": [ + { + "bbox": [ + 61, + 389, + 175, + 407 + ], + "type": "text", + "content": "(a) VDocRAG answers correctly, but Text-based RAG answers incorrectly" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 190, + 290, + 289, + 387 + ], + "blocks": [ + { + "bbox": [ + 190, + 290, + 289, + 387 + ], + "lines": [ + { + "bbox": [ + 190, + 290, + 289, + 387 + ], + "spans": [ + { + "bbox": [ + 190, + 290, + 289, + 387 + ], + "type": "image", + "image_path": "150a4df0e8043a1b2230cf04914527d44abdde76e7a1e1f42228cdd4b0087919.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 181, + 389, + 292, + 407 + ], + "lines": [ + { + "bbox": [ + 181, + 389, + 292, + 407 + ], + "spans": [ + { + "bbox": [ + 181, + 389, + 292, + 407 + ], + "type": "text", + "content": "(b) VDocRAG answers incorrectly, but Text-based RAG answers correctly" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 69, + 419, + 281, + 430 + ], + "lines": [ + { + "bbox": [ + 69, + 419, + 281, + 430 + ], + "spans": [ + { + "bbox": [ + 69, + 419, + 281, + 430 + ], + "type": "text", + "content": "Figure 7. Root causes of correct and incorrect predictions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 453, + 295, + 536 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 453, + 295, + 536 + ], + "spans": [ + { + "bbox": [ + 55, + 453, + 295, + 536 + ], + "type": "text", + "content": "Can our method apply different LVLMs? To investigate the impact of different LVLMs on VDocRAG, we replaced Phi3V with Idefics3 [29], a state-of-the-art LVLM that uses Llama3-8B [16] as its backbone LLM. As observed in Table 8, the performance trend was consistent with that of Phi3V, highlighting the versatility and broad applicability of our method." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 553, + 296, + 673 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 553, + 296, + 673 + ], + "spans": [ + { + "bbox": [ + 55, + 553, + 296, + 673 + ], + "type": "text", + "content": "Qualitative results. Figure 6 illustrates the performance of our model through qualitative examples. In the top example, VDocRAG demonstrates strong performance on a question requiring multi-hop reasoning and graph understanding across multi-page slides. In the bottom example, VDocRAG also performs better on a question that requires parsing on the table with cells spanning multiple rows and columns. In contrast, text-based RAG depends solely on OCR text information, leading to a superficial understanding of the text and incorrect predictions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 296, + 713 + ], + "type": "text", + "content": "Human evaluation. To better understand the prediction differences between VDocRAG and text-based RAG, we" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 313, + 293, + 555, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 293, + 555, + 424 + ], + "spans": [ + { + "bbox": [ + 313, + 293, + 555, + 424 + ], + "type": "text", + "content": "manually analyzed the generated outputs by identifying the root causes of 50 correct and 50 incorrect predictions, randomly sampled from test samples. Figure 7a shows that VDocRAG significantly enhances the understanding of visual data (e.g., charts). Conversely, Figure 7b reveals that VDocRAG encounters challenges with text-heavy documents (e.g., books), primarily due to the OCR capabilities. We observed that text-based RAG correctly answers questions when visual data includes long titles or subtitles, which have a high textual overlap with the question. These observations are in line with the results shown in Figure 5." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 313, + 436, + 388, + 449 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 436, + 388, + 449 + ], + "spans": [ + { + "bbox": [ + 313, + 436, + 388, + 449 + ], + "type": "text", + "content": "6. Conclusion" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 456, + 555, + 589 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 456, + 555, + 589 + ], + "spans": [ + { + "bbox": [ + 313, + 456, + 555, + 589 + ], + "type": "text", + "content": "We introduced a new RAG framework, VDocRAG, which can directly understand various real-world documents. We enhanced VDocRAG with two key contributions: (1) pretraining tasks capable of learning image representation efficiently by leveraging the powerful capabilities of LVLMs, and (2) OpenDocVQA, the first unified open-domain DocumentVQA dataset that encompasses a wide range of visually-rich documents. Our holistic evaluations on four datasets show that VDocRAG significantly outperformed conventional text-based RAG, shedding light on the development of an effective RAG over real-world documents." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "spans": [ + { + "bbox": [ + 313, + 605, + 556, + 715 + ], + "type": "text", + "content": "Limitations. While we focused on pre-training to align images and OCR data for document retrieval, leveraging caption data instead of OCR data offers the potential for retrieving images that do not contain text. Moreover, this study did not address reducing the computational cost of creating search indexes for extensive image collections. We plan to reduce the cost of VDocRAG using more efficient techniques. Lastly, joint training of QA and retrieval components simultaneously further optimizes their interactions." + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "spans": [ + { + "bbox": [ + 56, + 71, + 115, + 83 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 57, + 91, + 294, + 714 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 61, + 91, + 294, + 145 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 91, + 294, + 145 + ], + "spans": [ + { + "bbox": [ + 61, + 91, + 294, + 145 + ], + "type": "text", + "content": "[1] Marah Abdin, Sam Ade Jacobs, Ammar Ahmad Awan, Jyoti Aneja, Ahmed Awadallah, Hany Awadalla, Nguyen Bach, Amit Bahree, Arash Bakhtiari, Harkirat Behl, et al. Phi-3 technical report: A highly capable language model locally on your phone. arXiv:2404.14219, 2024. 2, 5, 6, 3" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 61, + 146, + 294, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 146, + 294, + 190 + ], + "spans": [ + { + "bbox": [ + 61, + 146, + 294, + 190 + ], + "type": "text", + "content": "[2] Josh Achiam, Steven Adler, Sandhini Agarwal, Lama Ahmad, Ilge Akkaya, Florencia Leoni Aleman, Diogo Almeida, Janko Altenschmidt, Sam Altman, Shyamal Anadkat, et al. GPT-4 technical report. arXiv:2303.08774, 2023. 1, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 62, + 191, + 294, + 224 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 191, + 294, + 224 + ], + "spans": [ + { + "bbox": [ + 62, + 191, + 294, + 224 + ], + "type": "text", + "content": "[3] Akari Asai, Sewon Min, Zexuan Zhong, and Danqi Chen. Retrieval-based language models and applications. In ACL, pages 41-46, 2023. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 62, + 225, + 294, + 267 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 225, + 294, + 267 + ], + "spans": [ + { + "bbox": [ + 62, + 225, + 294, + 267 + ], + "type": "text", + "content": "[4] Ali Furkan Biten, Rubén Tito, Andrés Mafla, Lluis Gómez i Bigorda, Marçal Rusinol, C. V. Jawahar, Ernest Valveny, and Dimosthenis Karatzas. Scene text visual question answering. In ICCV, pages 4290-4300, 2019. 6" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 62, + 270, + 294, + 334 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 270, + 294, + 334 + ], + "spans": [ + { + "bbox": [ + 62, + 270, + 294, + 334 + ], + "type": "text", + "content": "[5] Sebastian Borgeaud, Arthur Mensch, Jordan Hoffmann, Trevor Cai, Eliza Rutherford, Katie Millican, George Bm Van Den Driessche, Jean-Baptiste Lespiau, Bogdan Damoc, Aidan Clark, et al. Improving language models by retrieving from trillions of tokens. In ICML, pages 2206-2240, 2022. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 62, + 335, + 294, + 378 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 335, + 294, + 378 + ], + "spans": [ + { + "bbox": [ + 62, + 335, + 294, + 378 + ], + "type": "text", + "content": "[6] Minwoo Byeon, Beomhee Park, Haecheon Kim, Sungjun Lee, Woonhyuk Baek, and Saehoon Kim. Coyo-700m: Image-text pair dataset. https://github.com/kakaobrain/coyo-dataset, 2022.3" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 62, + 380, + 294, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 380, + 294, + 423 + ], + "spans": [ + { + "bbox": [ + 62, + 380, + 294, + 423 + ], + "type": "text", + "content": "[7] Jingwen Chen, Yingwei Pan, Yehao Li, Ting Yao, Hongyang Chao, and Tao Mei. Retrieval augmented convolutional encoder-decoder networks for video captioning. TOMCCAP, pages 1-24, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 62, + 425, + 294, + 456 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 425, + 294, + 456 + ], + "spans": [ + { + "bbox": [ + 62, + 425, + 294, + 456 + ], + "type": "text", + "content": "[8] Wenhu Chen, Hexiang Hu, Chitwan Sahara, and William W Cohen. Re-imagen: Retrieval-augmented text-to-image generator. arXiv:2209.14491, 2022. 2" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 62, + 458, + 294, + 501 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 62, + 458, + 294, + 501 + ], + "spans": [ + { + "bbox": [ + 62, + 458, + 294, + 501 + ], + "type": "text", + "content": "[9] Jaemin Cho, Debanjan Mahata, Ozan Irsoy, Yujie He, and Mohit Bansal. M3DocRAG: Multi-modal retrieval is what you need for multi-page multi-document understanding. arXiv:2411.04952, 2024. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 57, + 502, + 294, + 556 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 502, + 294, + 556 + ], + "spans": [ + { + "bbox": [ + 57, + 502, + 294, + 556 + ], + "type": "text", + "content": "[10] Wenliang Dai, Junnan Li, Dongxu Li, Anthony Meng Huat Tiong, Junqi Zhao, Weisheng Wang, Boyang Li, Pascale Fung, and Steven Hoi. InstructBLIP: Towards general-purpose vision-language models with instruction tuning. arXiv:2305.06500, 2023. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 57, + 558, + 294, + 600 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 558, + 294, + 600 + ], + "spans": [ + { + "bbox": [ + 57, + 558, + 294, + 600 + ], + "type": "text", + "content": "[11] Tri Dao, Dan Fu, Stefano Ermon, Atri Rudra, and Christopher Ré. FlashAttention: Fast and memory-efficient exact attention with io-awareness. In NeurIPS, pages 16344-16359, 2022. 5" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 57, + 602, + 294, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 602, + 294, + 645 + ], + "spans": [ + { + "bbox": [ + 57, + 602, + 294, + 645 + ], + "type": "text", + "content": "[12] Jacob Devlin, Ming-Wei Chang, Kenton Lee, and Kristina Toutanova. BERT: pre-training of deep bidirectional transformers for language understanding. In *NAACL-HLT*, pages 4171–4186, 2019. 5, 6" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 57, + 647, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 647, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 57, + 647, + 294, + 689 + ], + "type": "text", + "content": "[13] Kuicai Dong, Yujing Chang, Xin Deik Goh, Dexun Li, Ruiming Tang, and Yong Liu. MMDocIR: Benchmarking multi-modal retrieval for long documents. arXiv:2501.08828, 2025. 2" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 57, + 691, + 294, + 714 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 57, + 691, + 294, + 714 + ], + "spans": [ + { + "bbox": [ + 57, + 691, + 294, + 714 + ], + "type": "text", + "content": "[14] Xiaoyi Dong, Pan Zhang, Yuhang Zang, Yuhang Cao, Bin Wang, Linke Ouyang, Songyang Zhang, Haodong Duan," + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 30, + "blocks": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 333, + 73, + 553, + 106 + ], + "type": "text", + "content": "Wenwei Zhang, Yining Li, et al. Internlm-xcomposer2-4khd: A pioneering large vision-language model handling resolutions from 336 pixels to 4k hd. arXiv:2404.06512, 2024. 4" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 107, + 553, + 150 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 553, + 150 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 553, + 150 + ], + "type": "text", + "content": "[15] Matthijs Douze, Alexandr Guzhva, Chengqi Deng, Jeff Johnson, Gergely Szilvasy, Pierre-Emmanuel Mazaré, Maria Lomeli, Lucas Hosseini, and Hervé Jégou. The faiss library. arXiv:2401.08281, 2024. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "spans": [ + { + "bbox": [ + 316, + 152, + 553, + 195 + ], + "type": "text", + "content": "[16] Abhimanyu Dubey, Abhinav Jauhri, Abhinav Pandey, Abhishek Kadian, Ahmad Al-Dahle, Aiesha Letman, Akhil Mathur, Alan Schelten, Amy Yang, Angela Fan, et al. The llama 3 herd of models. arXiv:2407.21783, 2024. 1, 8" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "spans": [ + { + "bbox": [ + 316, + 198, + 553, + 240 + ], + "type": "text", + "content": "[17] Manuel Faysse, Hugues Sibille, Tony Wu, Gautier Vi-aud, Céline Hudelot, and Pierre Colombo. ColPali: Efficient document retrieval with vision language models. arXiv:2407.01449, 2024. 1, 2, 3, 6" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 243, + 553, + 274 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 243, + 553, + 274 + ], + "spans": [ + { + "bbox": [ + 316, + 243, + 553, + 274 + ], + "type": "text", + "content": "[18] Kelvin Guu, Kenton Lee, Zora Tung, Panupong Pasupat, and Mingwei Chang. Retrieval augmented language model pretraining. In ICML, pages 3929-3938, 2020. 1" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 277, + 553, + 309 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 277, + 553, + 309 + ], + "spans": [ + { + "bbox": [ + 316, + 277, + 553, + 309 + ], + "type": "text", + "content": "[19] Matthew Honnibal and Ines Montani. spaCy 2: Natural language understanding with Bloom embeddings, convolutional neural networks and incremental parsing. To appear, 2017. 3" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 312, + 553, + 354 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 312, + 553, + 354 + ], + "spans": [ + { + "bbox": [ + 316, + 312, + 553, + 354 + ], + "type": "text", + "content": "[20] Anwen Hu, Haiyang Xu, Jiabo Ye, Ming Yan, Liang Zhang, Bo Zhang, Chen Li, Ji Zhang, Qin Jin, Fei Huang, et al. nplug-docowl 1.5: Unified structure learning forOCR-free document understanding. arXiv:2403.12895, 2024. 5" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 356, + 553, + 399 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 356, + 553, + 399 + ], + "spans": [ + { + "bbox": [ + 316, + 356, + 553, + 399 + ], + "type": "text", + "content": "[21] Edward J Hu, Yelong Shen, Phillip Wallis, Zeyuan Allen-Zhu, Yuanzhi Li, Shean Wang, Lu Wang, and Weizhu Chen. LoRA: Low-rank adaptation of large language models. arXiv:2106.09685, 2021. 5" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "spans": [ + { + "bbox": [ + 316, + 401, + 553, + 444 + ], + "type": "text", + "content": "[22] Gautier Izacard, Mathilde Caron, Lucas Hosseini, Sebastian Riedel, Piotr Bojanowski, Armand Joulin, and Edouard Grave. Unsupervised dense information retrieval with contrastive learning. arXiv:2112.09118, 2021. 5, 6, 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 317, + 446, + 553, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 446, + 553, + 521 + ], + "spans": [ + { + "bbox": [ + 317, + 446, + 553, + 521 + ], + "type": "text", + "content": "[23] Albert Q. Jiang, Alexandre Sablayrolles, Arthur Mensch, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Florian Bressand, Gianna Lengyel, Guillaume Lample, Lucile Saulnier, Lélio Renard Lavaud, Marie-Anne Lachaux, Pierre Stock, Teven Le Scao, Thibaut Lavril, Thomas Wang, Timothée Lacroix, and William El Sayed. Mistral 7b. arXiv:2310.06825, 2023. 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 317, + 524, + 553, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 317, + 524, + 553, + 577 + ], + "spans": [ + { + "bbox": [ + 317, + 524, + 553, + 577 + ], + "type": "text", + "content": "[24] Albert Q Jiang, Alexandre Sablayrolles, Antoine Roux, Arthur Mensch, Blanche Savary, Chris Bamford, Devendra Singh Chaplot, Diego de las Casas, Emma Bou Hanna, Florian Bressand, et al. Mixtral of experts. arXiv:2401.04088, 2024. 1, 3" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 580, + 553, + 622 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 580, + 553, + 622 + ], + "spans": [ + { + "bbox": [ + 316, + 580, + 553, + 622 + ], + "type": "text", + "content": "[25] Ehsan Kamalloo, Nandan Thakur, Carlos Lassance, Xueguang Ma, Jheng-Hong Yang, and Jimmy Lin. Resources for brewing heir: Reproducible reference models and an official leaderboard, 2023. 6" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 624, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 624, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 624, + 553, + 668 + ], + "type": "text", + "content": "[26] Yuma Koizumi, Yasunori Ohishi, Daisuke Niizumi, Daiki Takeuchi, and Masahiro Yasuda. Audio captioning using pre-trained large-scale language model guided by audiobased similar caption retrieval. arXiv:2012.07331, 2020. 2" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 670, + 553, + 713 + ], + "type": "text", + "content": "[27] Sunjun Kweon, Yeonsu Kwon, Seonhee Cho, Yohan Jo, and Edward Choi. Open-WikiTable: Dataset for open domain question answering with complex reasoning over table. In Findings of ACL, pages 8285-8297, 2023. 3, 5, 1" + } + ] + } + ], + "index": 29 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 15, + "blocks": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "spans": [ + { + "bbox": [ + 56, + 73, + 294, + 127 + ], + "type": "text", + "content": "[28] Jordy Landeghem, Rubén Tito, Łukasz Borchmann, Michal Pietruszka, Paweł Józiak, Rafał Powalski, Dawid Jurkiewicz, Mickaël Coustaty, Bertrand Ackaert, Ernest Valveny, et al. Document understanding dataset and evaluation (dude). In ICCV, pages 19528-19540, 2023. 2, 3, 5, 1" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 128, + 294, + 172 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 128, + 294, + 172 + ], + "spans": [ + { + "bbox": [ + 56, + 128, + 294, + 172 + ], + "type": "text", + "content": "[29] Hugo Laurençon, Andrés Marafioti, Victor Sanh, and Léo Tronchon. Building and better understanding vision-language models: insights and future directions. arXiv:2408.12637, 2024. 2, 8" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 172, + 294, + 216 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 172, + 294, + 216 + ], + "spans": [ + { + "bbox": [ + 56, + 172, + 294, + 216 + ], + "type": "text", + "content": "[30] Chankyu Lee, Rajarshi Roy, Mengyao Xu, Jonathan Raiman, Mohammad Shoeybi, Bryan Catanzaro, and Wei Ping. NvEmbed: Improved techniques for training llms as generalist embedding models. arXiv:2405.17428, 2024. 5, 6, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 217, + 294, + 271 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 217, + 294, + 271 + ], + "spans": [ + { + "bbox": [ + 56, + 217, + 294, + 271 + ], + "type": "text", + "content": "[31] Patrick Lewis, Ethan Perez, Aleksandra Piktus, Fabio Petroni, Vladimir Karpukhin, Naman Goyal, Heinrich Kuttler, Mike Lewis, Wen-tau Yih, Tim Rocktäschel, et al. Retrieval-augmented generation for knowledge-intensive nlp tasks. In NIPS, pages 9459-9474, 2020. 1" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 271, + 294, + 316 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 271, + 294, + 316 + ], + "spans": [ + { + "bbox": [ + 56, + 271, + 294, + 316 + ], + "type": "text", + "content": "[32] Junnan Li, Dongxu Li, Caiming Xiong, and Steven Hoi. Blip: Bootstrapping language-image pre-training for unified vision-language understanding and generation. In ICML, pages 12888-12900, 2022. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 316, + 294, + 360 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 316, + 294, + 360 + ], + "spans": [ + { + "bbox": [ + 56, + 316, + 294, + 360 + ], + "type": "text", + "content": "[33] Junnan Li, Dongxu Li, Silvio Savarese, and Steven Hoi. BLIP-2: bootstrapping language-image pre-training with frozen image encoders and large language models. In ICML, pages 19730–19742, 2023. 2" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 360, + 294, + 403 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 360, + 294, + 403 + ], + "spans": [ + { + "bbox": [ + 56, + 360, + 294, + 403 + ], + "type": "text", + "content": "[34] Zehan Li, Xin Zhang, Yanzhao Zhang, Dingkun Long, Pengjun Xie, and Meishan Zhang. Towards general text embeddings with multi-stage contrastive learning. arXiv:2308.03281, 2023. 5, 6" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 404, + 294, + 426 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 404, + 294, + 426 + ], + "spans": [ + { + "bbox": [ + 56, + 404, + 294, + 426 + ], + "type": "text", + "content": "[35] Haotian Liu, Chunyuan Li, Qingyang Wu, and Yong Jae Lee. Visual instruction tuning. arXiv:2304.08485, 2023. 2" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 426, + 294, + 448 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 426, + 294, + 448 + ], + "spans": [ + { + "bbox": [ + 56, + 426, + 294, + 448 + ], + "type": "text", + "content": "[36] Ilya Loshchilov and Frank Hutter. Decoupled weight decay regularization. arXiv:1711.05101, 2017. 5" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "spans": [ + { + "bbox": [ + 56, + 449, + 294, + 491 + ], + "type": "text", + "content": "[37] Xueguang Ma, Sheng-Chieh Lin, Minghan Li, Wenhu Chen, and Jimmy Lin. Unifying multimodal retrieval via document screenshot embedding. arXiv:2406.11251, 2024. 1, 2, 5, 6, 3" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 492, + 294, + 536 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 492, + 294, + 536 + ], + "spans": [ + { + "bbox": [ + 56, + 492, + 294, + 536 + ], + "type": "text", + "content": "[38] Xueguang Ma, Shengyao Zhuang, Bevan Koopman, Guido Zuccon, Wenhu Chen, and Jimmy Lin. VISA: Retrieval augmented generation with visual source attribution. arXiv:2412.14457, 2024. 2" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 537, + 294, + 581 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 537, + 294, + 581 + ], + "spans": [ + { + "bbox": [ + 56, + 537, + 294, + 581 + ], + "type": "text", + "content": "[39] Seiji Maekawa, Hayate Iso, Sairam Gurajada, and Nikita Bhutani. Retrieval helps or hurts? a deeper dive into the efficacy of retrieval augmentation to language models. In NAACL, pages 5506-5521, 2024. 1, 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 582, + 294, + 635 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 582, + 294, + 635 + ], + "spans": [ + { + "bbox": [ + 56, + 582, + 294, + 635 + ], + "type": "text", + "content": "[40] Alex Mallen, Akari Asai, Victor Zhong, Rajarshi Das, Daniel Khashabi, and Hannaneh Hajishirzi. When not to trust language models: Investigating effectiveness of parametric and non-parametric memories. In ACL, pages 9802-9822, 2023. 1, 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 635, + 294, + 680 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 635, + 294, + 680 + ], + "spans": [ + { + "bbox": [ + 56, + 635, + 294, + 680 + ], + "type": "text", + "content": "[41] Ahmed Masry, Xuan Long Do, Jia Qing Tan, Shafiq Joty, and Enamul Hoque. ChartQA: A benchmark for question answering about charts with visual and logical reasoning. In Findings of ACL, pages 2263-2279, 2022. 2, 3, 5, 6, 1" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 681, + 294, + 713 + ], + "type": "text", + "content": "[42] Minesh Mathew, Dimosthenis Karatzas, and C. V. Jawahar. DocVQA: A dataset for vqa on document images. In WACV, pages 2200-2209, 2021. 1, 2, 5" + } + ] + } + ], + "index": 14 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 316, + 73, + 553, + 713 + ], + "type": "list", + "angle": 0, + "index": 31, + "blocks": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "spans": [ + { + "bbox": [ + 316, + 73, + 553, + 106 + ], + "type": "text", + "content": "[43] Minesh Mathew, Viraj Bagal, Ruben Tito, Dimosthenis Karatzas, Ernest Valveny, and C.V. Jawahar. InfographicVQA. In WACV, pages 1697-1706, 2022. 1, 2, 3, 5" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "spans": [ + { + "bbox": [ + 316, + 107, + 553, + 139 + ], + "type": "text", + "content": "[44] Aaron van den Oord, Yazhe Li, and Oriol Vinyals. Representation learning with contrastive predictive coding. arXiv:1807.03748, 2018. 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 140, + 553, + 184 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 140, + 553, + 184 + ], + "spans": [ + { + "bbox": [ + 316, + 140, + 553, + 184 + ], + "type": "text", + "content": "[45] Md Rizwan Parvez, Wasi Uddin Ahmad, Saikat Chakraborty, Baishakhi Ray, and Kai-Wei Chang. Retrieval augmented code generation and summarization. arXiv:2108.11601, 2021. 2" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 316, + 185, + 553, + 239 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 185, + 553, + 239 + ], + "spans": [ + { + "bbox": [ + 316, + 185, + 553, + 239 + ], + "type": "text", + "content": "[46] Le Qi, Shangwen Lv, Hongyu Li, Jing Liu, Yu Zhang, Qiaoqiao She, Hua Wu, Haifeng Wang, and Ting Liu. DuReadervis: A Chinese dataset for open-domain document visual question answering. In Findings of ACL, pages 1338-1351, 2022. 2, 3" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 316, + 241, + 553, + 297 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 241, + 553, + 297 + ], + "spans": [ + { + "bbox": [ + 316, + 241, + 553, + 297 + ], + "type": "text", + "content": "[47] Alec Radford, Jong Wook Kim, Chris Hallacy, Aditya Ramesh, Gabriel Goh, Sandhini Agarwal, Girish Sastry, Amanda Askell, Pamela Mishkin, Jack Clark, et al. Learning transferable visual models from natural language supervision. In ICML, pages 8748-8763, 2021. 5, 6" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 316, + 297, + 553, + 351 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 297, + 553, + 351 + ], + "spans": [ + { + "bbox": [ + 316, + 297, + 553, + 351 + ], + "type": "text", + "content": "[48] Colin Raffel, Noam Shazeer, Adam Roberts, Katherine Lee, Sharan Narang, Michael Matena, Yanqi Zhou, Wei Li, and Peter J. Liu. Exploring the limits of transfer learning with a unified text-to-text transformer. JMLR, 21(140):1-67, 2020. 2" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 316, + 353, + 553, + 397 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 353, + 553, + 397 + ], + "spans": [ + { + "bbox": [ + 316, + 353, + 553, + 397 + ], + "type": "text", + "content": "[49] Ori Ram, Yoav Levine, Itay Dalmedigos, Dor Muhlgay, Amnon Shashua, Kevin Leyton-Brown, and Yoav Shoham. Incontext retrieval-augmented language models. TACL, pages 1316-1331, 2023. 2" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 316, + 398, + 553, + 430 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 398, + 553, + 430 + ], + "spans": [ + { + "bbox": [ + 316, + 398, + 553, + 430 + ], + "type": "text", + "content": "[50] Rita Ramos, Desmond Elliott, and Bruno Martins. Retrievalaugmented image captioning. In EACL, pages 3666-3681, 2023. 2" + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 316, + 432, + 553, + 475 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 432, + 553, + 475 + ], + "spans": [ + { + "bbox": [ + 316, + 432, + 553, + 475 + ], + "type": "text", + "content": "[51] Rita Ramos, Bruno Martins, Desmond Elliott, and Yova Kementchedjhieva. Smallcap: lightweight image captioning prompted with retrieval augmentation. In CVPR, pages 2840-2849, 2023. 2" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 316, + 476, + 553, + 510 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 476, + 553, + 510 + ], + "spans": [ + { + "bbox": [ + 316, + 476, + 553, + 510 + ], + "type": "text", + "content": "[52] Stephen Robertson, Hugo Zaragoza, et al. The probabilistic relevance framework: Bm25 and beyond. Foundations and Trends® in Information Retrieval, 3(4):333-389, 2009. 5, 6" + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 316, + 511, + 553, + 555 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 511, + 553, + 555 + ], + "spans": [ + { + "bbox": [ + 316, + 511, + 553, + 555 + ], + "type": "text", + "content": "[53] Junyoung Seo, Susung Hong, Wooseok Jang, Ines Hyeonsu Kim, Minseop Kwak, Doyup Lee, and Seungryong Kim. Retrieval-augmented score distillation for text-to-3d generation. arXiv:2402.02972, 2024. 2" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 316, + 555, + 553, + 578 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 555, + 553, + 578 + ], + "spans": [ + { + "bbox": [ + 316, + 555, + 553, + 578 + ], + "type": "text", + "content": "[54] Ray Smith. An overview of the tesseractOCR engine. In ICDAR, pages 629-633, 2007. 5" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 316, + 579, + 553, + 633 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 579, + 553, + 633 + ], + "spans": [ + { + "bbox": [ + 316, + 579, + 553, + 633 + ], + "type": "text", + "content": "[55] Mirac Suzgun, Nathan Scales, Nathanael Scharli, Sebastian Gehrmann, Yi Tay, Hyung Won Chung, Aakanksha Chowdhery, Quoc V Le, Ed H Chi, Denny Zhou, et al. Challenging big-bench tasks and whether chain-of-thought can solve them. arXiv:2210.09261, 2022. 1" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 316, + 635, + 553, + 668 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 635, + 553, + 668 + ], + "spans": [ + { + "bbox": [ + 316, + 635, + 553, + 668 + ], + "type": "text", + "content": "[56] Ryota Tanaka, Kyosuke Nishida, and Sen Yoshida. VisualMRC: Machine reading comprehension on document images. In AAAI, pages 13878-13888, 2021. 1, 2, 3, 5" + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "spans": [ + { + "bbox": [ + 316, + 669, + 553, + 713 + ], + "type": "text", + "content": "[57] Ryota Tanaka, Kyosuke Nishida, Kosuke Nishida, Taku Hasegawa, Itsumi Saito, and Kuniko Saito. SlideVQA: A dataset for document visual question answering on multiple images. In AAAI, pages 13636-13645, 2023. 1, 2, 3, 5" + } + ] + } + ], + "index": 30 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 713 + ], + "type": "list", + "angle": 0, + "index": 14, + "blocks": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "spans": [ + { + "bbox": [ + 56, + 72, + 294, + 116 + ], + "type": "text", + "content": "[58] Ryota Tanaka, Taichi Iki, Kyosuke Nishida, Kuniko Saito, and Jun Suzuki. Instructdoc: A dataset for zero-shot generalization of visual document understanding with instructions. In AAAI, pages 19071-19079, 2024. 2" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "spans": [ + { + "bbox": [ + 56, + 118, + 294, + 162 + ], + "type": "text", + "content": "[59] Liang Wang, Nan Yang, Xiaolong Huang, Binxing Jiao, Linjun Yang, Daxin Jiang, Rangan Majumder, and Furu Wei. Text embeddings by weakly-supervised contrastive pretraining. arXiv:2212.03533, 2022. 5, 6" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 56, + 163, + 294, + 206 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 163, + 294, + 206 + ], + "spans": [ + { + "bbox": [ + 56, + 163, + 294, + 206 + ], + "type": "text", + "content": "[60] Liang Wang, Nan Yang, Xiaolong Huang, Linjun Yang, Rangan Majumder, and Furu Wei. Improving text embeddings with large language models. In ACL, pages 11897-11916, 2024. 5, 6, 3" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 56, + 209, + 294, + 241 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 209, + 294, + 241 + ], + "spans": [ + { + "bbox": [ + 56, + 209, + 294, + 241 + ], + "type": "text", + "content": "[61] Jilan Xu, Yifei Huang, Junlin Hou, Guo Chen, Yuejie Zhang, Rui Feng, and Weidi Xie. Retrieval-augmented egocentric video captioning. In CVPR, pages 13525-13536, 2024. 2" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 56, + 243, + 294, + 285 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 243, + 294, + 285 + ], + "spans": [ + { + "bbox": [ + 56, + 243, + 294, + 285 + ], + "type": "text", + "content": "[62] Dongchao Yang, Songxiang Liu, Rongjie Huang, Chao Weng, and Helen Meng. Instructtts: Modelling expressive tts in discrete latent space with natural language style prompt. TASLP, pages 2913-2925, 2024. 2" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "spans": [ + { + "bbox": [ + 56, + 287, + 294, + 363 + ], + "type": "text", + "content": "[63] Yuan Yao, Tianyu Yu, Ao Zhang, Chongyi Wang, Junbo Cui, Hongji Zhu, Tianchi Cai, Haoyu Li, Weilin Zhao, Zhihui He, Qianyu Chen, Huarong Zhou, Zhensheng Zou, Haoye Zhang, Shengding Hu, Zhi Zheng, Jie Zhou, Jie Cai, Xu Han, Guoyang Zeng, Dahai Li, Zhiyuan Liu, and Maosong Sun. Minicpm-v: A gpt-4v level mllm on your phone. arXiv:2408.01800, 2024. 6" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 56, + 365, + 294, + 419 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 365, + 294, + 419 + ], + "spans": [ + { + "bbox": [ + 56, + 365, + 294, + 419 + ], + "type": "text", + "content": "[64] Michihiro Yasunaga, Armen Aghajanyan, Weijia Shi, Rich James, Jure Leskovec, Percy Liang, Mike Lewis, Luke Zettlemoyer, and Wen-tau Yih. Retrieval-augmented multimodal language modeling. In ICML, pages 39755-39769, 2023. 2" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 56, + 421, + 294, + 487 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 421, + 294, + 487 + ], + "spans": [ + { + "bbox": [ + 56, + 421, + 294, + 487 + ], + "type": "text", + "content": "[65] Jiabo Ye, Anwen Hu, Haiyang Xu, Qinghao Ye, Ming Yan, Guohai Xu, Chenliang Li, Junfeng Tian, Qi Qian, Ji Zhang, Qin Jin, Liang He, Xin Lin, and Fei Huang. UReader: Universal OCR-free visually-situated language understanding with multimodal large language model. In EMNLP Findings, pages 2841-2858, 2023. 4" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 56, + 488, + 294, + 542 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 488, + 294, + 542 + ], + "spans": [ + { + "bbox": [ + 56, + 488, + 294, + 542 + ], + "type": "text", + "content": "[66] Shi Yu, Chaoyue Tang, Bokai Xu, Junbo Cui, Junhao Ran, Yukun Yan, Zhenghao Liu, Shuo Wang, Xu Han, Zhiyuan Liu, et al. VisRAG: Vision-based retrieval-augmented generation on multi-modality documents. arXiv:2410.10594, 2024. 2, 5, 6" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 56, + 544, + 294, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 544, + 294, + 577 + ], + "spans": [ + { + "bbox": [ + 56, + 544, + 294, + 577 + ], + "type": "text", + "content": "[67] Xiaohua Zhai, Basil Mustafa, Alexander Kolesnikov, and Lucas Beyer. Sigmoid loss for language image pre-training. In ICCV, pages 11975-11986, 2023. 2" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 56, + 578, + 294, + 611 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 578, + 294, + 611 + ], + "spans": [ + { + "bbox": [ + 56, + 578, + 294, + 611 + ], + "type": "text", + "content": "[68] Liang Zhang, Anwen Hu, Jing Zhang, Shuo Hu, and Qin Jin. MPMQA: multimodal question answering on product manuals. In AAAI, pages 13958-13966, 2023. 2, 3, 5, 1" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 56, + 613, + 294, + 655 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 613, + 294, + 655 + ], + "spans": [ + { + "bbox": [ + 56, + 613, + 294, + 655 + ], + "type": "text", + "content": "[69] Mingyuan Zhang, Xinying Guo, Liang Pan, Zhongang Cai, Fangzhou Hong, Huirong Li, Lei Yang, and Ziwei Liu. Remodiffuse: Retrieval-augmented motion diffusion model. In ICCV, pages 364-373, 2023. 2" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 56, + 658, + 294, + 689 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 658, + 294, + 689 + ], + "spans": [ + { + "bbox": [ + 56, + 658, + 294, + 689 + ], + "type": "text", + "content": "[70] Shuyan Zhou, Uri Alon, Frank F Xu, Zhiruo Wang, Zhengbao Jiang, and Graham Neubig. Docprompting: Generating code by retrieving the docs. arXiv:2207.05987, 2022. 2" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "spans": [ + { + "bbox": [ + 56, + 691, + 294, + 713 + ], + "type": "text", + "content": "[71] Fengbin Zhu, Wenqiang Lei, Fuli Feng, Chao Wang, Haozhou Zhang, and Tat-Seng Chua. Towards complex doc" + } + ] + } + ], + "index": 13 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 334, + 72, + 553, + 95 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 334, + 72, + 553, + 95 + ], + "spans": [ + { + "bbox": [ + 334, + 72, + 553, + 95 + ], + "type": "text", + "content": "ument understanding by discrete reasoning. In ACMM, pages 4857-4866, 2022. 2" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 233, + 104, + 376, + 121 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 233, + 104, + 376, + 121 + ], + "spans": [ + { + "bbox": [ + 233, + 104, + 376, + 121 + ], + "type": "text", + "content": "Supplementary Material" + } + ] + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 77, + 132, + 274, + 324 + ], + "blocks": [ + { + "bbox": [ + 77, + 132, + 274, + 324 + ], + "lines": [ + { + "bbox": [ + 77, + 132, + 274, + 324 + ], + "spans": [ + { + "bbox": [ + 77, + 132, + 274, + 324 + ], + "type": "table", + "html": "
StatisticsNumber
Total Images206,267
Total Questions43,474
- Single-Hop Questions33,244 (76.5%)
- Multi-Hop Questions10,230 (23.5%)
- Extractive Answer19,797 (45.5%)
- Abstractive Answer23,677 (54.5%)
QA Source Datasets9
- Existing DocumentVQA Datasets7
- Existing TableQA Datasets1
- Our Newly Created Datasets1
Maximum Question Length58
Maximum Answer Length130
Average Question Length13.7
Average Answer Length3.7
", + "image_path": "e9f6bab9c5e222d059bdff8614152323cb97a9ba24e7c9b7f1263981d7f9fd2b.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_body" + }, + { + "bbox": [ + 97, + 333, + 253, + 344 + ], + "lines": [ + { + "bbox": [ + 97, + 333, + 253, + 344 + ], + "spans": [ + { + "bbox": [ + 97, + 333, + 253, + 344 + ], + "type": "text", + "content": "Table A. Main statistics in OpenDocVQA." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_footnote" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 351, + 132, + 518, + 217 + ], + "blocks": [ + { + "bbox": [ + 351, + 132, + 518, + 217 + ], + "lines": [ + { + "bbox": [ + 351, + 132, + 518, + 217 + ], + "spans": [ + { + "bbox": [ + 351, + 132, + 518, + 217 + ], + "type": "image", + "image_path": "7e49aaf1f1323c641e43f8db00064972159c2186eda489691bd009dc1116d050.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 351, + 228, + 517, + 314 + ], + "blocks": [ + { + "bbox": [ + 389, + 219, + 480, + 228 + ], + "lines": [ + { + "bbox": [ + 389, + 219, + 480, + 228 + ], + "spans": [ + { + "bbox": [ + 389, + 219, + 480, + 228 + ], + "type": "text", + "content": "(a) Word cloud of questions." + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 351, + 228, + 517, + 314 + ], + "lines": [ + { + "bbox": [ + 351, + 228, + 517, + 314 + ], + "spans": [ + { + "bbox": [ + 351, + 228, + 517, + 314 + ], + "type": "image", + "image_path": "f73482c9c890084652a720744d7d93804bedd1871b4d6c21d20525e9e5aba77b.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 390, + 316, + 478, + 324 + ], + "lines": [ + { + "bbox": [ + 390, + 316, + 478, + 324 + ], + "spans": [ + { + "bbox": [ + 390, + 316, + 478, + 324 + ], + "type": "text", + "content": "(b) Word cloud of answers." + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_caption" + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 367, + 186, + 380 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 367, + 186, + 380 + ], + "spans": [ + { + "bbox": [ + 55, + 367, + 186, + 380 + ], + "type": "text", + "content": "A. OpenDocVQA Details" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 389, + 296, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 389, + 296, + 495 + ], + "spans": [ + { + "bbox": [ + 55, + 389, + 296, + 495 + ], + "type": "text", + "content": "Dataset Statistics. The main statistics of OpenDocVQA are presented in Table A. There are two types of questions: single-hop (45.5%) and multi-hop (23.5%). Answers to questions are categorized as extractive (45.5%) and abstractive (54.5%) types. OpenDocVQA consists of nine open-domain DocumentVQA datasets, including a newly created MHDocVQA dataset to address multi-hop questions over multiple documents, and collected and filtered QA datasets as follows." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 55, + 498, + 295, + 714 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "spans": [ + { + "bbox": [ + 55, + 498, + 295, + 521 + ], + "type": "text", + "content": "- DocVQA [42] includes industry document images collected from the UCSF Industry Document Library." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 55, + 522, + 295, + 545 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 522, + 295, + 545 + ], + "spans": [ + { + "bbox": [ + 55, + 522, + 295, + 545 + ], + "type": "text", + "content": "- InfoVQA [43] includes infographics downloaded from the Internet for the search query \"infographics\"." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 55, + 546, + 295, + 569 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 546, + 295, + 569 + ], + "spans": [ + { + "bbox": [ + 55, + 546, + 295, + 569 + ], + "type": "text", + "content": "- VisualMRC [56] is a visual machine reading comprehension on webpage screenshot images." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 55, + 570, + 295, + 605 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 570, + 295, + 605 + ], + "spans": [ + { + "bbox": [ + 55, + 570, + 295, + 605 + ], + "type": "text", + "content": "ChartQA [41] is a chart understanding dataset with human-written and machine-generated questions focusing on visual and logical reasoning." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 55, + 606, + 295, + 653 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 606, + 295, + 653 + ], + "spans": [ + { + "bbox": [ + 55, + 606, + 295, + 653 + ], + "type": "text", + "content": "- OpenWikiTable [27] is an open-domain question answering over tables. We took screenshot images of the tables, converting them into images with complex text layouts to handle visually-rich table data." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 55, + 654, + 295, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 654, + 295, + 689 + ], + "spans": [ + { + "bbox": [ + 55, + 654, + 295, + 689 + ], + "type": "text", + "content": "- DUDE [28] is a multi-page, multi-domain, and multi-industry QA dataset that requires processing long documents and understanding different types of documents." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "- MPMQA [68] requires comprehending multimodal content in an entire product manual and answering questions." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "type": "image", + "bbox": [ + 359, + 357, + 526, + 538 + ], + "blocks": [ + { + "bbox": [ + 316, + 335, + 550, + 346 + ], + "lines": [ + { + "bbox": [ + 316, + 335, + 550, + 346 + ], + "spans": [ + { + "bbox": [ + 316, + 335, + 550, + 346 + ], + "type": "text", + "content": "Figure A. Word cloud distributions of question and answer texts." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 359, + 357, + 526, + 538 + ], + "lines": [ + { + "bbox": [ + 359, + 357, + 526, + 538 + ], + "spans": [ + { + "bbox": [ + 359, + 357, + 526, + 538 + ], + "type": "image", + "image_path": "53209fd369b7adc5051e928ef74d2ca73a849e33e79fbfde065f67f13f855ff2.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 328, + 546, + 538, + 558 + ], + "lines": [ + { + "bbox": [ + 328, + 546, + 538, + 558 + ], + "spans": [ + { + "bbox": [ + 328, + 546, + 538, + 558 + ], + "type": "text", + "content": "Figure B. Distribution of first three words of the question." + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_caption" + } + ], + "index": 20 + }, + { + "bbox": [ + 313, + 578, + 553, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 553, + 614 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 553, + 614 + ], + "type": "text", + "content": "- SlideVQA [57] requires multi-hop reasoning over multiple slide images containing various text formats, layouts, and visual content such as plots and charts." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 313, + 615, + 553, + 675 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 615, + 553, + 675 + ], + "spans": [ + { + "bbox": [ + 313, + 615, + 553, + 675 + ], + "type": "text", + "content": "Figure A presents word clouds of the most frequently appeared words in the question and answer texts, illustrating that OpenDocVQA covers a wide range of topics and words. This observation is further supported by Figure B, which is a sunburst of the first three words of the questions." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 553, + 714 + ], + "type": "text", + "content": "Filtering DocumentVQA datasets. We applied the following five heuristic rules to automatically filter out likely" + } + ] + } + ], + "index": 24 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 61, + 70, + 81, + 92 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 61, + 70, + 81, + 92 + ], + "spans": [ + { + "bbox": [ + 61, + 70, + 81, + 92 + ], + "type": "text", + "content": "#" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 82, + 79, + 549, + 95 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 82, + 79, + 549, + 95 + ], + "spans": [ + { + "bbox": [ + 82, + 79, + 549, + 95 + ], + "type": "text", + "content": "VDocRAG: Retrieval-Augmented Generation over Visually-Rich Documents" + } + ] + } + ], + "index": 1 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 70, + 94, + 510, + 361 + ], + "blocks": [ + { + "bbox": [ + 72, + 73, + 216, + 84 + ], + "lines": [ + { + "bbox": [ + 72, + 73, + 216, + 84 + ], + "spans": [ + { + "bbox": [ + 72, + 73, + 216, + 84 + ], + "type": "text", + "content": "Multi-hop Question Generation Prompt" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 70, + 94, + 510, + 361 + ], + "lines": [ + { + "bbox": [ + 70, + 94, + 510, + 361 + ], + "spans": [ + { + "bbox": [ + 70, + 94, + 510, + 361 + ], + "type": "text", + "content": "EXAMPLE1: \nquestion1: In which country is the GWP smallest? \nanswer1: Denmark \nquestion2: What is the staple diet of Denmark? \nanswer2: Fish, cheese \ncombined question: What is the staple diet of the country where the GWP is the smallest? \nEXAMPLE2: \nquestion1: To which League does Chicago Cubs belong? \nanswer1: MLB \nquestion2: What is the average MLB team value? \nanswer2: $1.5b \ncombined question: What is the average the league where Chicago Cubs belongs to team value? \nEXAMPLE3 \nquestion1: Which is the capital city of Germany? \nanswer1: Berlin \nquestion2: What year did Berlin host the OKFestival? \nanswer2: It's 2014. \ncombined question: What year did the capital city of Germany host the OKFestival? \nBased on the above 3 examples, provide a combined question for the following case, such that the answer to the combined question is the same as the answer2: \nquestion1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \ncombined question:" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "code_body" + } + ], + "index": 1, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "type": "code", + "bbox": [ + 70, + 441, + 490, + 509 + ], + "blocks": [ + { + "bbox": [ + 55, + 383, + 555, + 407 + ], + "lines": [ + { + "bbox": [ + 55, + 383, + 555, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 383, + 555, + 407 + ], + "type": "text", + "content": "Table B. Multi-hop question generation prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions." + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 72, + 419, + 207, + 431 + ], + "lines": [ + { + "bbox": [ + 72, + 419, + 207, + 431 + ], + "spans": [ + { + "bbox": [ + 72, + 419, + 207, + 431 + ], + "type": "text", + "content": "Multi-hop Question Filtering Prompt" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "code_caption" + }, + { + "bbox": [ + 70, + 441, + 490, + 509 + ], + "lines": [ + { + "bbox": [ + 70, + 441, + 490, + 509 + ], + "spans": [ + { + "bbox": [ + 70, + 441, + 490, + 509 + ], + "type": "text", + "content": "question1: {single-hop question} \nanswer1: {single-hop answer} \nquestion2: {single-hop question} \nanswer2: {single-hop answer} \nBased on the questions and answers above, please answer the following question shortly. If the answer is not identified, the answer is 'None': {multi-hop question}" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "code_body" + }, + { + "bbox": [ + 55, + 531, + 555, + 555 + ], + "lines": [ + { + "bbox": [ + 55, + 531, + 555, + 555 + ], + "spans": [ + { + "bbox": [ + 55, + 531, + 555, + 555 + ], + "type": "text", + "content": "Table C. Multi-hop question filtering prompt. “{single-hop question}” and “{single-hop answer}” are placeholders of two single-hop questions. “{multi-hop question}” denotes the generated multi-hop questions." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_caption" + } + ], + "index": 4, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 55, + 575, + 175, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 575, + 175, + 586 + ], + "spans": [ + { + "bbox": [ + 55, + 575, + 175, + 586 + ], + "type": "text", + "content": "context-dependent questions:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 590, + 295, + 686 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 55, + 590, + 295, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 590, + 295, + 613 + ], + "spans": [ + { + "bbox": [ + 55, + 590, + 295, + 613 + ], + "type": "text", + "content": "- The question has one or more demonstrative pronouns, including \"this\", \"these\", and \"those\"." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 614, + 295, + 638 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 614, + 295, + 638 + ], + "spans": [ + { + "bbox": [ + 55, + 614, + 295, + 638 + ], + "type": "text", + "content": "- The question has one or more personal pronouns, including \"she\", \"he\", \"her\", \"his\", and \"him\"." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 55, + 638, + 295, + 662 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 638, + 295, + 662 + ], + "spans": [ + { + "bbox": [ + 55, + 638, + 295, + 662 + ], + "type": "text", + "content": "- The question has one or more specific keywords, including \"the document\" and \"mention\"." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 662, + 295, + 674 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 662, + 295, + 674 + ], + "spans": [ + { + "bbox": [ + 55, + 662, + 295, + 674 + ], + "type": "text", + "content": "- The question does not contain entities except for numbers." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 55, + 674, + 220, + 686 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 674, + 220, + 686 + ], + "spans": [ + { + "bbox": [ + 55, + 674, + 220, + 686 + ], + "type": "text", + "content": "- The question is shorter than six words." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "spans": [ + { + "bbox": [ + 55, + 689, + 295, + 714 + ], + "type": "text", + "content": "Any samples matching at least one of these rules were removed from our dataset. After applying the rules, we" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 313, + 575, + 555, + 671 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 575, + 555, + 671 + ], + "spans": [ + { + "bbox": [ + 313, + 575, + 555, + 671 + ], + "type": "text", + "content": "manually reviewed all the questions to ensure context-independence, guided by the instruction: \"When you see the question without a given document, can you find a unique document in the corpus to provide a unique answer?\" To validate our review, we randomly sampled 50 questions with their gold and top-5 retrieved documents (from VDocRetriever) and found no ambiguous cases, confirming the high quality of our process." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "spans": [ + { + "bbox": [ + 313, + 689, + 555, + 714 + ], + "type": "text", + "content": "Prompts for creating multi-hop questions. Table B shows the prompt for combining two single-hop questions" + } + ] + } + ], + "index": 15 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 116, + 70, + 494, + 191 + ], + "blocks": [ + { + "bbox": [ + 116, + 70, + 494, + 191 + ], + "lines": [ + { + "bbox": [ + 116, + 70, + 494, + 191 + ], + "spans": [ + { + "bbox": [ + 116, + 70, + 494, + 191 + ], + "type": "table", + "html": "
DatasetTask Description
DocVQAYou have to find an industry document that answers my question.
InfoVQAGiven a question, retrieve an infographic to answer the question.
VisualMRCI'm looking for a screenshot image that answers the question.
ChartQAGiven a user query, retrieve a chart image that answers the query.
OpenWikiTableGiven a user query, retrieve a table image for answering the question.
DUDEYou need to retrieve evidence from a PDF page to address the question.
MPMQAI want to know the answer to the question. Can you find evidence from manual pages?
SlideVQAGiven a question, retrieve a slide image to answer the question.
MHDocVQAGiven a multihop-question, retrieve multiple pages that can help answer the question.
", + "image_path": "44bd8baf3943678355dc4b467e0811d9b27915bb0fc9814308e0a335c81d698f.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 61, + 230, + 291, + 349 + ], + "blocks": [ + { + "bbox": [ + 198, + 200, + 411, + 210 + ], + "lines": [ + { + "bbox": [ + 198, + 200, + 411, + 210 + ], + "spans": [ + { + "bbox": [ + 198, + 200, + 411, + 210 + ], + "type": "text", + "content": "Table D. Instructions in the visual document retrieval task." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 61, + 230, + 291, + 349 + ], + "lines": [ + { + "bbox": [ + 61, + 230, + 291, + 349 + ], + "spans": [ + { + "bbox": [ + 61, + 230, + 291, + 349 + ], + "type": "table", + "html": "
ModelModel Checkpoint
Contrieverfacebook/contriever-msmarco
E5intfloat/e5-base-v2
GTEthenlper/gte-base
E5-Mistralintfloat/e5-mistral-7b-instruct
NV-Embed-v2nvidia/NV-Embed-v2
CLIPopenai/clip-vit-large-patch14-336
DSETevatron/dse-phi3-docmatix-v1
VisRAG-Retopenmbv/VisRAG-Ret
Phi3Vmicrosoft/Phi-3-vision-128k-instruct
Idefics3HuggingFaceM4/Idefics3-8B-Llama3
", + "image_path": "a670a379606925aebe13fb798758eded19703da2df393c27de2a6123d0706960.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "type": "table", + "bbox": [ + 111, + 378, + 241, + 491 + ], + "blocks": [ + { + "bbox": [ + 79, + 357, + 271, + 369 + ], + "lines": [ + { + "bbox": [ + 79, + 357, + 271, + 369 + ], + "spans": [ + { + "bbox": [ + 79, + 357, + 271, + 369 + ], + "type": "text", + "content": "Table E. Model checkpoints stored on HuggingFace." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 111, + 378, + 241, + 491 + ], + "lines": [ + { + "bbox": [ + 111, + 378, + 241, + 491 + ], + "spans": [ + { + "bbox": [ + 111, + 378, + 241, + 491 + ], + "type": "table", + "html": "
HyperparametersValue
Learning Rate1e-4
Gradient Accumulation4
Adam W β10.9
Adam W β20.999
LoRA Attention Dimension r8
LoRA Scaling Alpha64
LoRA Dropout0.1
LoRA Target*.proj
BF16True
", + "image_path": "5090daed0843b128ed56ee5afc1f886125beab326b643bf72d7df2aaafa12051.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "table_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 55, + 530, + 295, + 556 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 530, + 295, + 556 + ], + "spans": [ + { + "bbox": [ + 55, + 530, + 295, + 556 + ], + "type": "text", + "content": "to generate multi-hop questions. Moreover, Table C shows the prompt for filtering the generated multi-hop questions." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 55, + 565, + 181, + 578 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 565, + 181, + 578 + ], + "spans": [ + { + "bbox": [ + 55, + 565, + 181, + 578 + ], + "type": "text", + "content": "B. Experimental Details" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 55, + 586, + 295, + 633 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 586, + 295, + 633 + ], + "spans": [ + { + "bbox": [ + 55, + 586, + 295, + 633 + ], + "type": "text", + "content": "Instruction templates. Following a standard LLM-based retrieval training and evaluation strategy [60], we applied natural language instruction templates to the original question for the visual document retrieval task:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 73, + 643, + 277, + 657 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 643, + 277, + 657 + ], + "spans": [ + { + "bbox": [ + 73, + 643, + 277, + 657 + ], + "type": "text", + "content": "Instruct: {task description} \\n Query: {question}," + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "spans": [ + { + "bbox": [ + 55, + 665, + 296, + 715 + ], + "type": "text", + "content": "where “{task description}” is a placeholder for a one-sentence task description as shown in Table D. Note that the instruction format was applied to only LLM-based retrievers, including E5-Mistral [60], NV-Embed-v2 [30]," + } + ] + } + ], + "index": 10 + }, + { + "type": "table", + "bbox": [ + 317, + 230, + 553, + 295 + ], + "blocks": [ + { + "bbox": [ + 59, + 498, + 290, + 510 + ], + "lines": [ + { + "bbox": [ + 59, + 498, + 290, + 510 + ], + "spans": [ + { + "bbox": [ + 59, + 498, + 290, + 510 + ], + "type": "text", + "content": "Table F. Hyperparameters used for pre-training and fine-tuning." + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 317, + 230, + 553, + 295 + ], + "lines": [ + { + "bbox": [ + 317, + 230, + 553, + 295 + ], + "spans": [ + { + "bbox": [ + 317, + 230, + 553, + 295 + ], + "type": "table", + "html": "
Max Image ResolutionRetrievalANLSQA Generation Time
nDCG@5Encoding Time
336×33628.785.037.2394.5
672×67272.8106.442.7490.9
1344×134472.9204.456.2789.7
", + "image_path": "2c78af87a7f5b90dd0fafe9014a23fa580f89c137511a187dc4e21ee04dd4a3a.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "table_body" + } + ], + "index": 11 + }, + { + "bbox": [ + 313, + 303, + 555, + 338 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 303, + 555, + 338 + ], + "spans": [ + { + "bbox": [ + 313, + 303, + 555, + 338 + ], + "type": "text", + "content": "Table G. Impact of image resolution on InfoVQA under the single-pool setting. Average time (ms) to encode a single document or generate a single answer is measured on a single A100 GPU." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 313, + 357, + 556, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 357, + 556, + 467 + ], + "spans": [ + { + "bbox": [ + 313, + 357, + 556, + 467 + ], + "type": "text", + "content": "DSE [37], Phi3 [1], and VDocRetriever. Our preliminary experiments observed that using the instruction during both training and evaluation improved the performance of LLM-based retrievers. However, applying the same instruction format to non-LLM-based retrievers, such as Contriever [22], resulted in a performance decline due to lacking instruction-following capabilities. Furthermore, we appended an instruction regarding the desired output format for the DocumentVQA task:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 395, + 475, + 473, + 489 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 395, + 475, + 473, + 489 + ], + "spans": [ + { + "bbox": [ + 395, + 475, + 473, + 489 + ], + "type": "inline_equation", + "content": "\\backslash" + }, + { + "bbox": [ + 395, + 475, + 473, + 489 + ], + "type": "text", + "content": " n Answer briefly." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 313, + 503, + 555, + 529 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 503, + 555, + 529 + ], + "spans": [ + { + "bbox": [ + 313, + 503, + 555, + 529 + ], + "type": "text", + "content": "Model checkpoints Table E shows model initialization checkpoints stored on HuggingFace1." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 313, + 543, + 554, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 543, + 554, + 567 + ], + "spans": [ + { + "bbox": [ + 313, + 543, + 554, + 567 + ], + "type": "text", + "content": "Model hyperparameters Table F lists hyperparameters in pre-training and fine-tuning used for our models." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 313, + 578, + 506, + 593 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 578, + 506, + 593 + ], + "spans": [ + { + "bbox": [ + 313, + 578, + 506, + 593 + ], + "type": "text", + "content": "C. Additional Experimental Analysis" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 313, + 598, + 555, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 598, + 555, + 696 + ], + "spans": [ + { + "bbox": [ + 313, + 598, + 555, + 696 + ], + "type": "text", + "content": "How does image resolution impact performance? Table G shows that increasing image resolution improved the model's capability to understand and encode the document; however, it also significantly increased the inference time for both retrieval and QA tasks. Moreover, the performance in the QA task exhibited greater sensitivity to image resolution compared to the retrieval task, indicating that the QA task demands more detailed visual understanding." + } + ] + } + ], + "index": 18 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 325, + 702, + 438, + 713 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 325, + 702, + 438, + 713 + ], + "spans": [ + { + "bbox": [ + 325, + 702, + 438, + 713 + ], + "type": "text", + "content": "1https://huggingface.co" + } + ] + } + ], + "index": 19 + } + ], + "page_size": [ + 612, + 792 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 63, + 71, + 289, + 184 + ], + "blocks": [ + { + "bbox": [ + 63, + 71, + 289, + 184 + ], + "lines": [ + { + "bbox": [ + 63, + 71, + 289, + 184 + ], + "spans": [ + { + "bbox": [ + 63, + 71, + 289, + 184 + ], + "type": "image", + "image_path": "c3e1f531e3d74c264ced8dd2963260bf24946c515cb9e7bea31a89d5669a150c.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 56, + 193, + 295, + 216 + ], + "lines": [ + { + "bbox": [ + 56, + 193, + 295, + 216 + ], + "spans": [ + { + "bbox": [ + 56, + 193, + 295, + 216 + ], + "type": "text", + "content": "Figure C. QA performance with various top-k on InfoVQA under the single-pool setting. () denotes document sources." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + } + ], + "index": 0 + }, + { + "bbox": [ + 55, + 236, + 295, + 308 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 236, + 295, + 308 + ], + "spans": [ + { + "bbox": [ + 55, + 236, + 295, + 308 + ], + "type": "text", + "content": "How many retrieved documents to augment? Figure C shows that incorporating three documents yielded the best results in VDocRAG. While adding a few documents may include helpful contexts, adding more low-ranked or randomly sampled documents introduces noise and deteriorates generation due to the imperfections of retrievers." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 55, + 323, + 295, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 55, + 323, + 295, + 407 + ], + "spans": [ + { + "bbox": [ + 55, + 323, + 295, + 407 + ], + "type": "text", + "content": "Additional qualitative results. Figure D shows qualitative results of VDocRAG compared to text-based RAG. VDocRAG demonstrates significant performance advantages in understanding layouts and visual content, such as tables, charts, figures, and diagrams. These findings highlight the critical role of representing documents as images to improve the performance of the RAG framework." + } + ] + } + ], + "index": 3 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "bbox": [ + 228, + 121, + 277, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 228, + 121, + 277, + 129 + ], + "spans": [ + { + "bbox": [ + 228, + 121, + 277, + 129 + ], + "type": "text", + "content": "VDocRetriever" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 66, + 140, + 161, + 156 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 140, + 161, + 156 + ], + "spans": [ + { + "bbox": [ + 66, + 140, + 161, + 156 + ], + "type": "text", + "content": "How many apps does the company which makes Clash of Clans make?" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 174, + 116, + 182 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 174, + 116, + 182 + ], + "spans": [ + { + "bbox": [ + 69, + 174, + 116, + 182 + ], + "type": "text", + "content": "Ground-truth: 7" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 189, + 127, + 197 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 189, + 127, + 197 + ], + "spans": [ + { + "bbox": [ + 69, + 189, + 127, + 197 + ], + "type": "text", + "content": "Text-based RAG: 61" + } + ] + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 135, + 187, + 145, + 196 + ], + "blocks": [ + { + "bbox": [ + 135, + 187, + 145, + 196 + ], + "lines": [ + { + "bbox": [ + 135, + 187, + 145, + 196 + ], + "spans": [ + { + "bbox": [ + 135, + 187, + 145, + 196 + ], + "type": "image", + "image_path": "f896e0eb27f884677490aa86ef18df4953d23755fa8e44a9ccdc08bc604d0195.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 201, + 108, + 210 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 201, + 108, + 210 + ], + "spans": [ + { + "bbox": [ + 69, + 201, + 108, + 210 + ], + "type": "text", + "content": "VDocRAG: 7" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 135, + 201, + 145, + 211 + ], + "blocks": [ + { + "bbox": [ + 135, + 201, + 145, + 211 + ], + "lines": [ + { + "bbox": [ + 135, + 201, + 145, + 211 + ], + "spans": [ + { + "bbox": [ + 135, + 201, + 145, + 211 + ], + "type": "image", + "image_path": "8fda62ac62030b06a549a2ac78a33236c21b558754f50098b167c4b3d99054c7.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 162, + 150, + 245, + 216 + ], + "blocks": [ + { + "bbox": [ + 194, + 139, + 220, + 148 + ], + "lines": [ + { + "bbox": [ + 194, + 139, + 220, + 148 + ], + "spans": [ + { + "bbox": [ + 194, + 139, + 220, + 148 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 162, + 150, + 245, + 216 + ], + "lines": [ + { + "bbox": [ + 162, + 150, + 245, + 216 + ], + "spans": [ + { + "bbox": [ + 162, + 150, + 245, + 216 + ], + "type": "image", + "image_path": "02e19ae4e950e2108d7311580eeef92b303a0b15c955b13f5bb0d1de7673722c.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 253, + 150, + 342, + 217 + ], + "blocks": [ + { + "bbox": [ + 286, + 139, + 314, + 148 + ], + "lines": [ + { + "bbox": [ + 286, + 139, + 314, + 148 + ], + "spans": [ + { + "bbox": [ + 286, + 139, + 314, + 148 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 253, + 150, + 342, + 217 + ], + "lines": [ + { + "bbox": [ + 253, + 150, + 342, + 217 + ], + "spans": [ + { + "bbox": [ + 253, + 150, + 342, + 217 + ], + "type": "image", + "image_path": "fd6d7dd7029c1a3057e13b3b9406c6fc9423887c3cc2e59a07e224f78adf5288.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + } + ], + "index": 10 + }, + { + "bbox": [ + 416, + 120, + 482, + 129 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 416, + 120, + 482, + 129 + ], + "spans": [ + { + "bbox": [ + 416, + 120, + 482, + 129 + ], + "type": "text", + "content": "Text-based Retriever" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 392, + 139, + 405, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 392, + 139, + 405, + 148 + ], + "spans": [ + { + "bbox": [ + 392, + 139, + 405, + 148 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 408, + 139, + 417, + 148 + ], + "blocks": [ + { + "bbox": [ + 408, + 139, + 417, + 148 + ], + "lines": [ + { + "bbox": [ + 408, + 139, + 417, + 148 + ], + "spans": [ + { + "bbox": [ + 408, + 139, + 417, + 148 + ], + "type": "image", + "image_path": "4b0b054b065675e8636c73ddf038537162d7f7281b521ad2886aeb9652aafcca.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 361, + 149, + 445, + 215 + ], + "blocks": [ + { + "bbox": [ + 361, + 149, + 445, + 215 + ], + "lines": [ + { + "bbox": [ + 361, + 149, + 445, + 215 + ], + "spans": [ + { + "bbox": [ + 361, + 149, + 445, + 215 + ], + "type": "image", + "image_path": "b0bd7c2f12f9f023e7bf5933be336cfa80c05bb664a078dd8f3292b12b05d72a.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "bbox": [ + 484, + 139, + 497, + 148 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 484, + 139, + 497, + 148 + ], + "spans": [ + { + "bbox": [ + 484, + 139, + 497, + 148 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 499, + 139, + 511, + 148 + ], + "blocks": [ + { + "bbox": [ + 499, + 139, + 511, + 148 + ], + "lines": [ + { + "bbox": [ + 499, + 139, + 511, + 148 + ], + "spans": [ + { + "bbox": [ + 499, + 139, + 511, + 148 + ], + "type": "image", + "image_path": "10b565d1b28db9a28db2f8edc5bebe2760ff7ad373804320019c8e37cb788e11.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "bbox": [ + 468, + 155, + 529, + 163 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 468, + 155, + 529, + 163 + ], + "spans": [ + { + "bbox": [ + 468, + 155, + 529, + 163 + ], + "type": "text", + "content": "Top Free iOS App Earners" + } + ] + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 469, + 164, + 544, + 215 + ], + "blocks": [ + { + "bbox": [ + 469, + 164, + 544, + 215 + ], + "lines": [ + { + "bbox": [ + 469, + 164, + 544, + 215 + ], + "spans": [ + { + "bbox": [ + 469, + 164, + 544, + 215 + ], + "type": "image", + "image_path": "60d6ea6ed65da4afd72cffe5fb60f023b4f9af87271ab1f9836404bcc380b616.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 243, + 153, + 267 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 243, + 153, + 267 + ], + "spans": [ + { + "bbox": [ + 67, + 243, + 153, + 267 + ], + "type": "text", + "content": "What is the Stream Source for the API which uses Java, Scala, and Python?" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 71, + 276, + 159, + 285 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 276, + 159, + 285 + ], + "spans": [ + { + "bbox": [ + 71, + 276, + 159, + 285 + ], + "type": "text", + "content": "Ground-truth: HDFS, Network" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 71, + 291, + 135, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 291, + 135, + 300 + ], + "spans": [ + { + "bbox": [ + 71, + 291, + 135, + 300 + ], + "type": "text", + "content": "Text-based RAG: Fink" + } + ] + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 148, + 291, + 159, + 300 + ], + "blocks": [ + { + "bbox": [ + 148, + 291, + 159, + 300 + ], + "lines": [ + { + "bbox": [ + 148, + 291, + 159, + 300 + ], + "spans": [ + { + "bbox": [ + 148, + 291, + 159, + 300 + ], + "type": "image", + "image_path": "98ad0e6fc57ecfc3f03f5748718f4eb42c2e8f72d2583e53a9f04f4ec36cceff.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "bbox": [ + 71, + 304, + 149, + 312 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 304, + 149, + 312 + ], + "spans": [ + { + "bbox": [ + 71, + 304, + 149, + 312 + ], + "type": "text", + "content": "VDocRAG: HDFS, Network" + } + ] + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 162, + 251, + 256, + 323 + ], + "blocks": [ + { + "bbox": [ + 201, + 239, + 227, + 248 + ], + "lines": [ + { + "bbox": [ + 201, + 239, + 227, + 248 + ], + "spans": [ + { + "bbox": [ + 201, + 239, + 227, + 248 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 162, + 251, + 256, + 323 + ], + "lines": [ + { + "bbox": [ + 162, + 251, + 256, + 323 + ], + "spans": [ + { + "bbox": [ + 162, + 251, + 256, + 323 + ], + "type": "image", + "image_path": "7c3dc62a709a7e1b74f98d1eda8a376a4f7c332b6cdeb2a3d34e613e614345b9.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 257, + 251, + 348, + 323 + ], + "blocks": [ + { + "bbox": [ + 296, + 239, + 323, + 248 + ], + "lines": [ + { + "bbox": [ + 296, + 239, + 323, + 248 + ], + "spans": [ + { + "bbox": [ + 296, + 239, + 323, + 248 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 257, + 251, + 348, + 323 + ], + "lines": [ + { + "bbox": [ + 257, + 251, + 348, + 323 + ], + "spans": [ + { + "bbox": [ + 257, + 251, + 348, + 323 + ], + "type": "image", + "image_path": "38ac7060dba69efc014f7182af952de71ee03e6a3f3ae00d2e81ed36c512e0ad.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 412, + 238, + 424, + 247 + ], + "blocks": [ + { + "bbox": [ + 412, + 238, + 424, + 247 + ], + "lines": [ + { + "bbox": [ + 412, + 238, + 424, + 247 + ], + "spans": [ + { + "bbox": [ + 412, + 238, + 424, + 247 + ], + "type": "image", + "image_path": "36edfad1b65572802a919a3b8ea162fe7b27fd4468e7ccac548538547b9904d2.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 488, + 238, + 503, + 247 + ], + "lines": [ + { + "bbox": [ + 488, + 238, + 503, + 247 + ], + "spans": [ + { + "bbox": [ + 488, + 238, + 503, + 247 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 360, + 249, + 449, + 320 + ], + "blocks": [ + { + "bbox": [ + 397, + 238, + 411, + 247 + ], + "lines": [ + { + "bbox": [ + 397, + 238, + 411, + 247 + ], + "spans": [ + { + "bbox": [ + 397, + 238, + 411, + 247 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 360, + 249, + 449, + 320 + ], + "lines": [ + { + "bbox": [ + 360, + 249, + 449, + 320 + ], + "spans": [ + { + "bbox": [ + 360, + 249, + 449, + 320 + ], + "type": "image", + "image_path": "8fd4c6619a027e8b10e7f28376bbaad6aa53a02853b4a8d1c27d64b6a014d0e8.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 468, + 255, + 531, + 261 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 468, + 255, + 531, + 261 + ], + "spans": [ + { + "bbox": [ + 468, + 255, + 531, + 261 + ], + "type": "text", + "content": "The Reactive Streams Initiative" + } + ] + } + ], + "index": 32 + }, + { + "bbox": [ + 458, + 262, + 544, + 315 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 458, + 262, + 544, + 315 + ], + "spans": [ + { + "bbox": [ + 458, + 262, + 544, + 315 + ], + "type": "text", + "content": "Reactive Streams is an initiative to provide a standard for asynchronous stream processing with non-blocking back pressure on the JVM \nProblem Scope \nHandling streams of (live) data in an asynchronous and possibly non-blocking way Finding a minimal API describing the operations available on Reactive Streams \nImplementers \nRxlava \nAkka Streams \nReactor Composable Ratpack" + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 70, + 351, + 148, + 368 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 351, + 148, + 368 + ], + "spans": [ + { + "bbox": [ + 70, + 351, + 148, + 368 + ], + "type": "text", + "content": "Which is Microsoft's biggest acquisition to date?" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 73, + 385, + 134, + 394 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 385, + 134, + 394 + ], + "spans": [ + { + "bbox": [ + 73, + 385, + 134, + 394 + ], + "type": "text", + "content": "Ground-truth: Skype" + } + ] + } + ], + "index": 35 + }, + { + "bbox": [ + 74, + 399, + 143, + 407 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 399, + 143, + 407 + ], + "spans": [ + { + "bbox": [ + 74, + 399, + 143, + 407 + ], + "type": "text", + "content": "Text-based RAG: Oculus" + } + ] + } + ], + "index": 36 + }, + { + "type": "image", + "bbox": [ + 145, + 399, + 155, + 408 + ], + "blocks": [ + { + "bbox": [ + 145, + 399, + 155, + 408 + ], + "lines": [ + { + "bbox": [ + 145, + 399, + 155, + 408 + ], + "spans": [ + { + "bbox": [ + 145, + 399, + 155, + 408 + ], + "type": "image", + "image_path": "bc1de121b83276858f23fa75aa8f727e619999d63b1fc7668888226d99108051.jpg" + } + ] + } + ], + "index": 37, + "angle": 0, + "type": "image_body" + } + ], + "index": 37 + }, + { + "bbox": [ + 74, + 413, + 126, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 413, + 126, + 422 + ], + "spans": [ + { + "bbox": [ + 74, + 413, + 126, + 422 + ], + "type": "text", + "content": "VDocRAG: Skype" + } + ] + } + ], + "index": 38 + }, + { + "type": "image", + "bbox": [ + 145, + 412, + 156, + 421 + ], + "blocks": [ + { + "bbox": [ + 145, + 412, + 156, + 421 + ], + "lines": [ + { + "bbox": [ + 145, + 412, + 156, + 421 + ], + "spans": [ + { + "bbox": [ + 145, + 412, + 156, + 421 + ], + "type": "image", + "image_path": "c9b9fa6f8fb4c7743af1f2b25068bbfacddcdca27874fb27b620b98c9518579a.jpg" + } + ] + } + ], + "index": 39, + "angle": 0, + "type": "image_body" + } + ], + "index": 39 + }, + { + "type": "image", + "bbox": [ + 174, + 357, + 263, + 426 + ], + "blocks": [ + { + "bbox": [ + 233, + 346, + 260, + 356 + ], + "lines": [ + { + "bbox": [ + 233, + 346, + 260, + 356 + ], + "spans": [ + { + "bbox": [ + 233, + 346, + 260, + 356 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 40, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 174, + 357, + 263, + 426 + ], + "lines": [ + { + "bbox": [ + 174, + 357, + 263, + 426 + ], + "spans": [ + { + "bbox": [ + 174, + 357, + 263, + 426 + ], + "type": "image", + "image_path": "46384f36ba5e958631e5a95913c6a969f4434967e9130b3df130d48bf095fd9e.jpg" + } + ] + } + ], + "index": 41, + "angle": 0, + "type": "image_body" + } + ], + "index": 41 + }, + { + "type": "image", + "bbox": [ + 290, + 356, + 332, + 426 + ], + "blocks": [ + { + "bbox": [ + 290, + 356, + 332, + 426 + ], + "lines": [ + { + "bbox": [ + 290, + 356, + 332, + 426 + ], + "spans": [ + { + "bbox": [ + 290, + 356, + 332, + 426 + ], + "type": "image", + "image_path": "3db7ae0cbb6aa7ec0887e99eca957ba1d4f7a591fab2c412bd04ff5d7c27e484.jpg" + } + ] + } + ], + "index": 42, + "angle": 0, + "type": "image_body" + } + ], + "index": 42 + }, + { + "bbox": [ + 363, + 359, + 443, + 422 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 363, + 359, + 443, + 422 + ], + "spans": [ + { + "bbox": [ + 363, + 359, + 443, + 422 + ], + "type": "text", + "content": "Increasing number of prominent successful exits\n• Oracle TRUE2014\n• Acquired (Prestige) IPO Acquired (Oracle)\n• Acquired (Open) IPO\n• Gravity baily inktank + CDO\n• Acquired (AOL) Acquired (Apple) Acquired (Red Hat) Acquired (Open) Acquired (Oracle)\n• $8B+ in 2014 so far with more to come\nupfront" + } + ] + } + ], + "index": 44 + }, + { + "type": "image", + "bbox": [ + 506, + 346, + 518, + 355 + ], + "blocks": [ + { + "bbox": [ + 490, + 346, + 505, + 355 + ], + "lines": [ + { + "bbox": [ + 490, + 346, + 505, + 355 + ], + "spans": [ + { + "bbox": [ + 490, + 346, + 505, + 355 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 45, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 506, + 346, + 518, + 355 + ], + "lines": [ + { + "bbox": [ + 506, + 346, + 518, + 355 + ], + "spans": [ + { + "bbox": [ + 506, + 346, + 518, + 355 + ], + "type": "image", + "image_path": "ae6693ffead482e8053a2b2768b7491e91c23a4e6e1efa26e846487f7a2cfedf.jpg" + } + ] + } + ], + "index": 46, + "angle": 0, + "type": "image_body" + } + ], + "index": 46 + }, + { + "type": "image", + "bbox": [ + 452, + 357, + 547, + 422 + ], + "blocks": [ + { + "bbox": [ + 399, + 346, + 411, + 355 + ], + "lines": [ + { + "bbox": [ + 399, + 346, + 411, + 355 + ], + "spans": [ + { + "bbox": [ + 399, + 346, + 411, + 355 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 43, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 452, + 357, + 547, + 422 + ], + "lines": [ + { + "bbox": [ + 452, + 357, + 547, + 422 + ], + "spans": [ + { + "bbox": [ + 452, + 357, + 547, + 422 + ], + "type": "image", + "image_path": "8bf73301f72ebf4656f82cc481c15fbde6f61a8740c0b33b1c37027069fd9381.jpg" + } + ] + } + ], + "index": 47, + "angle": 0, + "type": "image_body" + } + ], + "index": 47 + }, + { + "bbox": [ + 70, + 454, + 157, + 471 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 454, + 157, + 471 + ], + "spans": [ + { + "bbox": [ + 70, + 454, + 157, + 471 + ], + "type": "text", + "content": "How many layers are used in the gloves for the DPE suit?" + } + ] + } + ], + "index": 48 + }, + { + "bbox": [ + 71, + 488, + 130, + 496 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 488, + 130, + 496 + ], + "spans": [ + { + "bbox": [ + 71, + 488, + 130, + 496 + ], + "type": "text", + "content": "Ground-truth: Three" + } + ] + } + ], + "index": 49 + }, + { + "bbox": [ + 71, + 502, + 135, + 511 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 502, + 135, + 511 + ], + "spans": [ + { + "bbox": [ + 71, + 502, + 135, + 511 + ], + "type": "text", + "content": "Text-based RAG: Two" + } + ] + } + ], + "index": 50 + }, + { + "type": "image", + "bbox": [ + 143, + 502, + 153, + 511 + ], + "blocks": [ + { + "bbox": [ + 143, + 502, + 153, + 511 + ], + "lines": [ + { + "bbox": [ + 143, + 502, + 153, + 511 + ], + "spans": [ + { + "bbox": [ + 143, + 502, + 153, + 511 + ], + "type": "image", + "image_path": "92b6df9e1b061c1adc5993c1f56e1cf0ed8cef4a78b71fab5fe1c20b06c29570.jpg" + } + ] + } + ], + "index": 51, + "angle": 0, + "type": "image_body" + } + ], + "index": 51 + }, + { + "bbox": [ + 71, + 516, + 122, + 524 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 71, + 516, + 122, + 524 + ], + "spans": [ + { + "bbox": [ + 71, + 516, + 122, + 524 + ], + "type": "text", + "content": "VDocRAG: Three" + } + ] + } + ], + "index": 52 + }, + { + "type": "image", + "bbox": [ + 143, + 516, + 153, + 524 + ], + "blocks": [ + { + "bbox": [ + 143, + 516, + 153, + 524 + ], + "lines": [ + { + "bbox": [ + 143, + 516, + 153, + 524 + ], + "spans": [ + { + "bbox": [ + 143, + 516, + 153, + 524 + ], + "type": "image", + "image_path": "9092ea0c400936ff15a858f0a195e9d0777cd251401800d6667b66fc6f19e152.jpg" + } + ] + } + ], + "index": 53, + "angle": 0, + "type": "image_body" + } + ], + "index": 53 + }, + { + "type": "image", + "bbox": [ + 184, + 459, + 340, + 527 + ], + "blocks": [ + { + "bbox": [ + 231, + 449, + 257, + 458 + ], + "lines": [ + { + "bbox": [ + 231, + 449, + 257, + 458 + ], + "spans": [ + { + "bbox": [ + 231, + 449, + 257, + 458 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 54, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 184, + 459, + 340, + 527 + ], + "lines": [ + { + "bbox": [ + 184, + 459, + 340, + 527 + ], + "spans": [ + { + "bbox": [ + 184, + 459, + 340, + 527 + ], + "type": "image", + "image_path": "f449f5484e1694c7bfff1780be558bd32c7a6c684713be27c6d42f4267bddc73.jpg" + } + ] + } + ], + "index": 55, + "angle": 0, + "type": "image_body" + } + ], + "index": 55 + }, + { + "type": "image", + "bbox": [ + 358, + 459, + 447, + 532 + ], + "blocks": [ + { + "bbox": [ + 397, + 449, + 408, + 458 + ], + "lines": [ + { + "bbox": [ + 397, + 449, + 408, + 458 + ], + "spans": [ + { + "bbox": [ + 397, + 449, + 408, + 458 + ], + "type": "text", + "content": "Top" + } + ] + } + ], + "index": 56, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 358, + 459, + 447, + 532 + ], + "lines": [ + { + "bbox": [ + 358, + 459, + 447, + 532 + ], + "spans": [ + { + "bbox": [ + 358, + 459, + 447, + 532 + ], + "type": "image", + "image_path": "97cc883868a778e50a29344b7815e08f67bf0882448e4cda19b58ca462572f5b.jpg" + } + ] + } + ], + "index": 57, + "angle": 0, + "type": "image_body" + } + ], + "index": 57 + }, + { + "type": "image", + "bbox": [ + 453, + 459, + 545, + 532 + ], + "blocks": [ + { + "bbox": [ + 488, + 449, + 501, + 458 + ], + "lines": [ + { + "bbox": [ + 488, + 449, + 501, + 458 + ], + "spans": [ + { + "bbox": [ + 488, + 449, + 501, + 458 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 58, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 453, + 459, + 545, + 532 + ], + "lines": [ + { + "bbox": [ + 453, + 459, + 545, + 532 + ], + "spans": [ + { + "bbox": [ + 453, + 459, + 545, + 532 + ], + "type": "image", + "image_path": "e63e20d4fea3336cf279de0bd01cde6d0aefe471c63e1b34f107f84be90e7006.jpg" + } + ] + } + ], + "index": 59, + "angle": 0, + "type": "image_body" + } + ], + "index": 59 + }, + { + "bbox": [ + 70, + 561, + 137, + 577 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 561, + 137, + 577 + ], + "spans": [ + { + "bbox": [ + 70, + 561, + 137, + 577 + ], + "type": "text", + "content": "What is the phase before full moon?" + } + ] + } + ], + "index": 60 + }, + { + "bbox": [ + 72, + 592, + 161, + 601 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 72, + 592, + 161, + 601 + ], + "spans": [ + { + "bbox": [ + 72, + 592, + 161, + 601 + ], + "type": "text", + "content": "Ground-truth: Waxing Gibbous" + } + ] + } + ], + "index": 61 + }, + { + "bbox": [ + 73, + 606, + 149, + 615 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 606, + 149, + 615 + ], + "spans": [ + { + "bbox": [ + 73, + 606, + 149, + 615 + ], + "type": "text", + "content": "Text-based RAG: New Mod" + } + ] + } + ], + "index": 62 + }, + { + "bbox": [ + 73, + 620, + 150, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 73, + 620, + 150, + 628 + ], + "spans": [ + { + "bbox": [ + 73, + 620, + 150, + 628 + ], + "type": "text", + "content": "VDocRAG: Waxing Gibbous" + } + ] + } + ], + "index": 63 + }, + { + "type": "image", + "bbox": [ + 175, + 567, + 220, + 630 + ], + "blocks": [ + { + "bbox": [ + 175, + 567, + 220, + 630 + ], + "lines": [ + { + "bbox": [ + 175, + 567, + 220, + 630 + ], + "spans": [ + { + "bbox": [ + 175, + 567, + 220, + 630 + ], + "type": "image", + "image_path": "60fe3beaaf4572fd3ce80a4927146705e8c482da349ae6b853c10beb2ba62fce.jpg" + } + ] + } + ], + "index": 65, + "angle": 0, + "type": "image_body" + } + ], + "index": 65 + }, + { + "type": "image", + "bbox": [ + 226, + 567, + 345, + 630 + ], + "blocks": [ + { + "bbox": [ + 233, + 553, + 258, + 563 + ], + "lines": [ + { + "bbox": [ + 233, + 553, + 258, + 563 + ], + "spans": [ + { + "bbox": [ + 233, + 553, + 258, + 563 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 64, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 226, + 567, + 345, + 630 + ], + "lines": [ + { + "bbox": [ + 226, + 567, + 345, + 630 + ], + "spans": [ + { + "bbox": [ + 226, + 567, + 345, + 630 + ], + "type": "image", + "image_path": "07854eb43642dc45afdf6f77025fe8200fed503d7127377a87544536c2e088d9.jpg" + } + ] + } + ], + "index": 66, + "angle": 0, + "type": "image_body" + } + ], + "index": 66 + }, + { + "type": "image", + "bbox": [ + 358, + 567, + 444, + 628 + ], + "blocks": [ + { + "bbox": [ + 398, + 553, + 426, + 563 + ], + "lines": [ + { + "bbox": [ + 398, + 553, + 426, + 563 + ], + "spans": [ + { + "bbox": [ + 398, + 553, + 426, + 563 + ], + "type": "text", + "content": "Top1" + } + ] + } + ], + "index": 67, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 358, + 567, + 444, + 628 + ], + "lines": [ + { + "bbox": [ + 358, + 567, + 444, + 628 + ], + "spans": [ + { + "bbox": [ + 358, + 567, + 444, + 628 + ], + "type": "image", + "image_path": "0489f0c8f2a8e8055c63cf3cf762948906913c7dd5adb78e0b699b9d12492872.jpg" + } + ] + } + ], + "index": 68, + "angle": 0, + "type": "image_body" + } + ], + "index": 68 + }, + { + "type": "image", + "bbox": [ + 447, + 567, + 493, + 631 + ], + "blocks": [ + { + "bbox": [ + 447, + 567, + 493, + 631 + ], + "lines": [ + { + "bbox": [ + 447, + 567, + 493, + 631 + ], + "spans": [ + { + "bbox": [ + 447, + 567, + 493, + 631 + ], + "type": "image", + "image_path": "0b86f9b43a10325329ad373f38c304d5f6d962aa2c5f295b4d7af1e06b4229c9.jpg" + } + ] + } + ], + "index": 70, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 495, + 552, + 518, + 562 + ], + "lines": [ + { + "bbox": [ + 495, + 552, + 518, + 562 + ], + "spans": [ + { + "bbox": [ + 495, + 552, + 518, + 562 + ], + "type": "text", + "content": "op2" + } + ] + } + ], + "index": 71, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 151, + 654, + 458, + 666 + ], + "lines": [ + { + "bbox": [ + 151, + 654, + 458, + 666 + ], + "spans": [ + { + "bbox": [ + 151, + 654, + 458, + 666 + ], + "type": "text", + "content": "Figure D. Additional qualitative results of VDocRAG compared to Text-based RAG." + } + ] + } + ], + "index": 73, + "angle": 0, + "type": "image_caption" + } + ], + "index": 70 + }, + { + "type": "image", + "bbox": [ + 494, + 571, + 545, + 630 + ], + "blocks": [ + { + "bbox": [ + 490, + 553, + 503, + 562 + ], + "lines": [ + { + "bbox": [ + 490, + 553, + 503, + 562 + ], + "spans": [ + { + "bbox": [ + 490, + 553, + 503, + 562 + ], + "type": "text", + "content": "Top2" + } + ] + } + ], + "index": 69, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 494, + 571, + 545, + 630 + ], + "lines": [ + { + "bbox": [ + 494, + 571, + 545, + 630 + ], + "spans": [ + { + "bbox": [ + 494, + 571, + 545, + 630 + ], + "type": "image", + "image_path": "bc117a74a492d448f6907f4399296864b7122b25adc16f67e341ba2813f33b24.jpg" + } + ] + } + ], + "index": 72, + "angle": 0, + "type": "image_body" + } + ], + "index": 72 + } + ], + "discarded_blocks": [], + "page_size": [ + 612, + 792 + ], + "page_idx": 15 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file