diff --git "a/2025/2D-DPO_ Scaling Direct Preference Optimization with 2-Dimensional Supervision/layout.json" "b/2025/2D-DPO_ Scaling Direct Preference Optimization with 2-Dimensional Supervision/layout.json" new file mode 100644--- /dev/null +++ "b/2025/2D-DPO_ Scaling Direct Preference Optimization with 2-Dimensional Supervision/layout.json" @@ -0,0 +1,19606 @@ +{ + "pdf_info": [ + { + "para_blocks": [ + { + "bbox": [ + 145, + 75, + 449, + 110 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 145, + 75, + 449, + 110 + ], + "spans": [ + { + "bbox": [ + 145, + 75, + 449, + 110 + ], + "type": "text", + "content": "2D-DPO: Scaling Direct Preference Optimization with 2-Dimensional Supervision" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 117, + 127, + 479, + 157 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 117, + 127, + 479, + 157 + ], + "spans": [ + { + "bbox": [ + 117, + 127, + 479, + 157 + ], + "type": "text", + "content": "Shilong Li\\*, Yancheng He\\*, Hui Huang\\*, Xingyuan Bu\\*, Jiaheng Liu, Hangyu Guo, Weixun Wang, Jihao Gu, Wenbo Su, Bo Zheng" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 210, + 157, + 378, + 170 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 210, + 157, + 378, + 170 + ], + "spans": [ + { + "bbox": [ + 210, + 157, + 378, + 170 + ], + "type": "text", + "content": "Taobao & Tmall Group of Alibaba" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 155, + 171, + 439, + 185 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 171, + 439, + 185 + ], + "spans": [ + { + "bbox": [ + 155, + 171, + 439, + 185 + ], + "type": "text", + "content": "lishilong.aaron@gmail.com, xingyuanbu@gmail.com" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "spans": [ + { + "bbox": [ + 155, + 219, + 202, + 232 + ], + "type": "text", + "content": "Abstract" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 84, + 244, + 274, + 531 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 244, + 274, + 531 + ], + "spans": [ + { + "bbox": [ + 84, + 244, + 274, + 531 + ], + "type": "text", + "content": "Recent advancements in Direct Preference Optimization (DPO) have significantly enhanced the alignment of Large Language Models (LLMs) with human preferences, owing to its simplicity and effectiveness. However, existing methods typically optimize a scalar score or ranking reward, thereby overlooking the multidimensional nature of human preferences. In this work, we propose to extend the preference of DPO to two dimensions: segments and aspects. We first introduce a 2D supervision dataset called HelpSteer-2D. For the segment dimension, we divide the response into sentences and assign scores to each segment. For the aspect dimension, we meticulously design several criteria covering the response quality rubrics. With the 2-dimensional signals as feedback, we develop a 2D-DPO framework, decomposing the overall objective into multi-segment and multi-aspect objectives. Extensive experiments on popular benchmarks demonstrate that 2D-DPO performs better than methods that optimize for scalar or 1-dimensional preferences1." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 68, + 544, + 154, + 557 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 544, + 154, + 557 + ], + "spans": [ + { + "bbox": [ + 68, + 544, + 154, + 557 + ], + "type": "text", + "content": "1 Introduction" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 565, + 291, + 715 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 565, + 291, + 715 + ], + "spans": [ + { + "bbox": [ + 67, + 565, + 291, + 715 + ], + "type": "text", + "content": "Recent advancements in Large Language Models (LLMs) have shown impressive performance across a wide range of tasks (Zhao et al., 2023; Bai et al., 2024; Wu et al., 2024a; Li et al., 2024a). A pivotal component in LLM training is Reinforcement Learning from Human Feedback (RLHF) (Ouyang et al., 2022; Bai et al., 2022), which aligns LLMs with human preferences. However, due to its complexity, traditional RLHF often leads to challenges such as training instability and reward collapse (Wolf et al., 2023; Song et al., 2023)." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 310, + 221, + 522, + 350 + ], + "blocks": [ + { + "bbox": [ + 310, + 221, + 522, + 350 + ], + "lines": [ + { + "bbox": [ + 310, + 221, + 522, + 350 + ], + "spans": [ + { + "bbox": [ + 310, + 221, + 522, + 350 + ], + "type": "image", + "image_path": "c26ba8c0e5261d211d99d0dfada1895e2b1f81f968390b5437139bcff023d577.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 362, + 525, + 386 + ], + "lines": [ + { + "bbox": [ + 302, + 362, + 525, + 386 + ], + "spans": [ + { + "bbox": [ + 302, + 362, + 525, + 386 + ], + "type": "text", + "content": "Figure 1: An illustrative comparison between vanilla DPO and 2D-DPO." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 407, + 526, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 407, + 526, + 651 + ], + "spans": [ + { + "bbox": [ + 302, + 407, + 526, + 651 + ], + "type": "text", + "content": "Direct Preference Optimization (DPO) (Rafailov et al., 2023), as a simpler and more effective alternative, has gained considerable attention due to its ability to bypass the need for explicitly fitting a reward model (Meng et al., 2024; Ethayarajh et al., 2024). However, most existing DPO-style approaches rely on scalar scores or rankings and ignore the multi-dimensional nature of human preferences, resulting in inefficient and imprecise optimization. For instance, a response may be deemed satisfactory under one aspect such as correctness, but falls short in another such as clarity. Moreover, not all segments of a response should be treated uniformly; even in a preferred response, there may be segments of inferior quality. This underscores the need for a more nuanced approach that recognizes the multi-dimensionality of feedback and its critical impact on model training." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "type": "text", + "content": "In response, some recent works have attempted to leverage signals that are believed to reflect the importance of individual segments as reward scores (Zeng et al., 2024; Chan et al., 2024; Jiang et al., 2024; Chen et al., 2024). However, these signals are often derived from statistical features such as edit distance or confidence estimation, which can introduce noise and lack interpretability. Other approaches incorporate multi-objective optima" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 722, + 291, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 722, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 67, + 722, + 291, + 774 + ], + "type": "text", + "content": "* Equal contribution. ‡ Corresponding Author. \n† Work done during an internship at Taobao & Tmall Group of Alibaba. \n1Code and datasets are available at https://github.com/longls777/2D-DPO." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "text", + "content": "8164" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 189, + 795, + 404, + 806 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 189, + 795, + 404, + 806 + ], + "spans": [ + { + "bbox": [ + 189, + 795, + 404, + 806 + ], + "type": "text", + "content": "Findings of the Association for Computational Linguistics:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 238, + 807, + 355, + 817 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 238, + 807, + 355, + 817 + ], + "spans": [ + { + "bbox": [ + 238, + 807, + 355, + 817 + ], + "type": "text", + "content": "NAACL 2025, pages 8164-8188" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 163, + 818, + 430, + 829 + ], + "type": "footer", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 818, + 430, + 829 + ], + "spans": [ + { + "bbox": [ + 163, + 818, + 430, + 829 + ], + "type": "text", + "content": "April 29 - May 4, 2025 ©2025 Association for Computational Linguistics" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 0 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "type": "text", + "content": "tion to balance different aspects of human preferences (Wu et al., 2024b; Guo et al., 2023; Cao et al., 2024). However, these methods mostly rely on Proximal Policy Optimization (PPO) (Schulman et al., 2017), which is prone to instability during training. Furthermore, these efforts only extend preference optimization from 0-dimensional (scalar reward) to 1-dimensional (aspect/segment) supervision, which remains insufficient for capturing the complexity of real-world human preferences." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 206, + 291, + 449 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 206, + 291, + 449 + ], + "spans": [ + { + "bbox": [ + 69, + 206, + 291, + 449 + ], + "type": "text", + "content": "To better address the intricacy of human preferences, we propose 2D-DPO, a novel direct alignment strategy that enables 2-dimensional (2D) fine-grained optimization. Our core idea is to scale supervision signals across two dimensions: segments and aspects. To this end, we first construct a preference dataset called HelpSteer-2D, where each sample is annotated with a 2-dimensional score matrix evaluating each segment across multiple aspects. These signals are derived from a robust model guided by a set of stringent principles, ensuring the generation of highly accurate and interpretable supervision signals. Building on this, we propose a novel approach to achieve 2-dimensional direct preference alignment. Experimental results on three public benchmarks demonstrate that 2D-DPO significantly outperforms previous methods. In summary, our main contributions are threefold:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 70, + 460, + 291, + 616 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 70, + 460, + 290, + 514 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 460, + 290, + 514 + ], + "spans": [ + { + "bbox": [ + 70, + 460, + 290, + 514 + ], + "type": "text", + "content": "- We introduce a novel 2-dimensional preference alignment method, 2D-DPO, which scales supervision signals across both segments and aspects to better align with human preferences." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 70, + 525, + 291, + 565 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 525, + 291, + 565 + ], + "spans": [ + { + "bbox": [ + 70, + 525, + 291, + 565 + ], + "type": "text", + "content": "- We develop a high-quality, fine-grained preference dataset, HelpSteer-2D, which will be released to the community for future research." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 70, + 576, + 290, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 576, + 290, + 616 + ], + "spans": [ + { + "bbox": [ + 70, + 576, + 290, + 616 + ], + "type": "text", + "content": "- Extensive experiments show that 2D-DPO delivers superior performance in aligning with human preferences compared to prior approaches." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 68, + 627, + 161, + 640 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 627, + 161, + 640 + ], + "spans": [ + { + "bbox": [ + 68, + 627, + 161, + 640 + ], + "type": "text", + "content": "2 Related Work" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 68, + 649, + 211, + 661 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 649, + 211, + 661 + ], + "spans": [ + { + "bbox": [ + 68, + 649, + 211, + 661 + ], + "type": "text", + "content": "2.1 Preference Optimization" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 775 + ], + "type": "text", + "content": "Large language models (LLMs) have advanced rapidly, with reinforcement learning from human feedback (RLHF) commonly used to align LLMs with human preferences (Ziegler et al., 2019; Stiessenon et al., 2020; Ouyang et al., 2022; Bai et al., 2022; Liu et al., 2024b,a; Peng et al., 2023; Feng et al., 2022). However, traditional RLHF methods face challenges like instability and high resource" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 71, + 526, + 274 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 274 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 274 + ], + "type": "text", + "content": "demands (Wolf et al., 2023; Song et al., 2023), prompting the search for simpler alternatives. One such representative approach is Direct Preference Optimization (DPO) (Rafailov et al., 2023), which optimizes alignment without explicit reward modeling, offering simplicity and stability. Building on DPO, IPO (Azar et al., 2024) adds a regularization term to alleviate overfitting. KTO (Ethayarajh et al., 2024) only requires a binary signal of whether an output is desirable or undesirable for an input to align LLMs, simplifying the data acquisition process. ORPO (Hong et al., 2024) simplifies training with odds ratio-based penalties, and SimPO (Meng et al., 2024) improves efficiency by using average log probability as an implicit reward." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 284, + 502, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 284, + 502, + 296 + ], + "spans": [ + { + "bbox": [ + 302, + 284, + 502, + 296 + ], + "type": "text", + "content": "2.2 Token-level Preference Optimization" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 301, + 526, + 586 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 301, + 526, + 586 + ], + "spans": [ + { + "bbox": [ + 302, + 301, + 526, + 586 + ], + "type": "text", + "content": "The response-level rewards in naive PPO and DPO often lack token-level details. To address this, researchers have explored fine-grained supervision signals in three ways: (1) Human annotation: Methods like PRM (Lightman et al., 2023) and FGRLHF (Wu et al., 2024b) involve human annotators labeling each segment of the response to generate fine-grained signals. (2) LLM annotation: To reduce the cost of human labeling, stronger LLMs are used to generate preference pairs with minimal edits (Guo et al., 2023; Chen et al., 2024; Yoon et al., 2024; Jiang et al., 2024) or to identify positive and negative response segments (Cao et al., 2024). (3) Internal signal: Some works use the model's internal information as reward signals, such as using attention scores for token rewards in ABC (Chan et al., 2024) or decomposing DPO's response-level rewards into token-level signals in SePO (Yang et al., 2024b; Rafailov et al., 2024). TDPO (Zeng et al., 2024) achieves token-level alignment by controlling the KL divergence for each token." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 595, + 520, + 608 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 595, + 520, + 608 + ], + "spans": [ + { + "bbox": [ + 302, + 595, + 520, + 608 + ], + "type": "text", + "content": "2.3 Multi-objective Preference Optimization" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 613, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 613, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 613, + 526, + 775 + ], + "type": "text", + "content": "Human preferences are often complex, diverse, and even contradictory, making single-dimensional training insufficient. To address this, some studies align LLMs with multiple objectives by either training separate reward models and averaging their outputs (Pan et al., 2023; Ji et al., 2024; Rame et al., 2024; de Langis et al., 2024; Wang et al., 2024a). However, this approach demands significant computational resources. In contrast, MODPO (Zhou et al., 2024) offers a simpler, reinforcement learning-free method for optimizing multiple objectives. RiC (Yang et al., 2024c) and" + } + ] + } + ], + "index": 13 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8165" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 1 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 98 + ], + "type": "text", + "content": "CPO (Guo et al., 2024) focus on integrating multiple reward values for controllable generation." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 111, + 140, + 125 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 111, + 140, + 125 + ], + "spans": [ + { + "bbox": [ + 67, + 111, + 140, + 125 + ], + "type": "text", + "content": "3 Approach" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 134, + 290, + 243 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 134, + 290, + 243 + ], + "spans": [ + { + "bbox": [ + 67, + 134, + 290, + 243 + ], + "type": "text", + "content": "In this section, we provide a detailed introduction to our 2D-DPO approach for aligning LLMs. We first describe the construction of the 2D preference dataset in Section 3.1. Then, we present an enhanced 2D-DPO formulation that integrates the 2D supervision signals into the alignment process in Section 3.2. The complete pipeline of our method is illustrated in Figure 2." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 253, + 255, + 267 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 253, + 255, + 267 + ], + "spans": [ + { + "bbox": [ + 67, + 253, + 255, + 267 + ], + "type": "text", + "content": "3.1 Preference Dataset with 2D Signal" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "spans": [ + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": "In general, a preference optimization dataset, denoted as " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "\\mathcal{D} = \\{x^i,y_w^i,y_l^i\\}_{i = 1}^N" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": ", comprises prompts " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "x" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": " along with a chosen response " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "y_{w}" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": " and a rejected response " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "y_{w}" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": " is of higher quality compared to " + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "inline_equation", + "content": "y_{l}" + }, + { + "bbox": [ + 67, + 273, + 291, + 488 + ], + "type": "text", + "content": ". Such datasets are commonly used to train reward models (e.g., PPO) or directly for model alignment (e.g., DPO). However, differentiating between the chosen and rejected responses based on a scalar score is often coarse and imprecise. The quality of responses can vary significantly across different evaluation aspects, and even a chosen response might contain segments of low quality, while a rejected response could include segments of high quality. Therefore, relying solely on a scalar score for optimization may restrict the model's ability to effectively align with human preferences." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 490, + 291, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 490, + 291, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 490, + 291, + 666 + ], + "type": "text", + "content": "To address this issue, we propose a fine-grained scoring approach that decomposes the scalar scores of model responses to segment-level and aspect-level. The first step is to divide the response into segments, and the choice of segment length is crucial for ensuring the effectiveness of the fine-grained optimization. Segments that are too long cannot resolve the aforementioned coarse scoring issues, while segments that are too short pose difficulties for accurate assessment. Therefore, we choose the sentence as the scoring unit, which can strike a balance between scoring accuracy and the clarity of segment preferences." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 667, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 667, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 667, + 291, + 775 + ], + "type": "text", + "content": "After segmenting responses based on typical sentence-ending punctuations, we employ GPT-4 to perform aspect-level scoring. Following HelpSteer2 (Wang et al., 2024b), we annotate the preference data across five key aspects: Helpfulness, Correctness, Safety, Completeness, Clarity. The first three aspects are independent of different sentences. The aspect of Completeness gen" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 166 + ], + "type": "text", + "content": "erally increases as responses become more comprehensive, while Clarity tends to decrease as responses grow longer and more redundant. To ensure the integrity of our annotations, we use separate prompts for each aspect to prevent any cross-influence among them. For the details of the annotation process, please refer to Appendix F." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 313, + 167, + 512, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 313, + 167, + 512, + 179 + ], + "spans": [ + { + "bbox": [ + 313, + 167, + 512, + 179 + ], + "type": "text", + "content": "Finally, the constructed dataset is as follows:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 345, + 190, + 440, + 206 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 345, + 190, + 440, + 206 + ], + "spans": [ + { + "bbox": [ + 345, + 190, + 440, + 206 + ], + "type": "interline_equation", + "content": "\\mathcal {D} = \\left\\{x ^ {i}, \\boldsymbol {y} _ {w} ^ {i}, \\boldsymbol {y} _ {l} ^ {i} \\right\\} _ {i = 1} ^ {N}", + "image_path": "eeda2d7f888ab53982a2a9211cfc52dccd9ffdfa11f7c84a0530d46d32fdd477.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 346, + 208, + 481, + 228 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 208, + 481, + 228 + ], + "spans": [ + { + "bbox": [ + 346, + 208, + 481, + 228 + ], + "type": "interline_equation", + "content": "\\boldsymbol {y} _ {w} ^ {i} = \\left\\{y _ {w, k} ^ {i}, \\left\\{r _ {w, k, j} ^ {i} \\right\\} _ {j = 1} ^ {A} \\right\\} _ {k = 1} ^ {S _ {w} ^ {i}},", + "image_path": "2df680b09e95205e40d465ca9f16cb966fabb6ac62a3bea5a3fb026f8395e5de.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 346, + 230, + 525, + 249 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 346, + 230, + 525, + 249 + ], + "spans": [ + { + "bbox": [ + 346, + 230, + 525, + 249 + ], + "type": "interline_equation", + "content": "\\boldsymbol {y} _ {l} ^ {i} = \\left\\{y _ {l, k} ^ {i}, \\left\\{r _ {l, k, j} ^ {i} \\right\\} _ {j = 1} ^ {A} \\right\\} _ {k = 1} ^ {S _ {l} ^ {i}}. \\tag {1}", + "image_path": "bae9cb91ccf14b9b384c16ced3c1859cf57f1108929c42c03638b39c71c4bd5f.jpg" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "spans": [ + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": "In the dataset " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "\\mathcal{D}" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": ", " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "x^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": " represents the " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": "-th prompt, while " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "\\pmb{y}_w^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "\\pmb{y}_l^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": " represent the 2D annotations for the chosen and rejected response, respectively. Each 2D annotation includes " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "S^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": " text segments, indexed by " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": ", denoted as " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "y_k^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": ". The scores for these segments are denoted by " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "r_{k,j}^i" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "inline_equation", + "content": "j" + }, + { + "bbox": [ + 302, + 261, + 526, + 356 + ], + "type": "text", + "content": " indicates the index across the aforementioned five aspects." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 303, + 366, + 371, + 378 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 366, + 371, + 378 + ], + "spans": [ + { + "bbox": [ + 303, + 366, + 371, + 378 + ], + "type": "text", + "content": "3.2 2D-DPO" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 302, + 384, + 525, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 384, + 525, + 506 + ], + "spans": [ + { + "bbox": [ + 302, + 384, + 525, + 506 + ], + "type": "text", + "content": "While the construction process of 2D signals is straightforward, integrating them effectively into the alignment process presents significant challenges. Previous approaches mostly utilize these signals as a scalar reward by weighted summation, which is insufficient for enabling the model to distinguish between varying quality across different dimensions. To address this issue, we propose a novel alignment method called 2D-DPO." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 302, + 514, + 525, + 582 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 514, + 525, + 582 + ], + "spans": [ + { + "bbox": [ + 302, + 514, + 525, + 582 + ], + "type": "text", + "content": "Vanilla DPO. Direct Preference Optimization (DPO) (Rafailov et al., 2023), as one of the most popular alignment methods, proposes a direct optimization objective that satisfies the optimal preference policy without using a reward model:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 315, + 594, + 525, + 671 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 315, + 594, + 525, + 671 + ], + "spans": [ + { + "bbox": [ + 315, + 594, + 525, + 671 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {D P O} \\left(\\pi_ {\\theta}; \\pi_ {r e f}\\right) = \\\\ - \\mathbb {E} _ {\\left(x, y ^ {w}, y ^ {l}\\right) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\beta \\log \\frac {\\pi_ {\\theta} \\left(y _ {w} \\mid x\\right)}{\\pi_ {r e f} \\left(y _ {w} \\mid x\\right)} \\right. \\right. \\\\ \\left. \\left. - \\beta \\log \\frac {\\pi_ {\\theta} (y _ {l} \\mid x)}{\\pi_ {r e f} (y _ {l} \\mid x)}\\right) \\right], \\tag {2} \\\\ \\end{array}", + "image_path": "991f5a8822f8470d16176869e5c24c093db37d3e0cb9269ea382d8a4e223c344.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\pi_{\\theta}" + }, + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\pi_{ref}" + }, + { + "bbox": [ + 302, + 680, + 525, + 775 + ], + "type": "text", + "content": " represent the policy model and the reference model, respectively. DPO can fundamentally be viewed as a multi-armed bandit problem, where the model's entire response is treated as a single arm. According to Rafailov et al. 2023, in the token-level Markov Decision Process (MDP), the language model's Bradley-Terry preference" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "8166" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 2 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 76, + 103, + 278, + 246 + ], + "blocks": [ + { + "bbox": [ + 75, + 92, + 173, + 102 + ], + "lines": [ + { + "bbox": [ + 75, + 92, + 173, + 102 + ], + "spans": [ + { + "bbox": [ + 75, + 92, + 173, + 102 + ], + "type": "text", + "content": "(a) Develop a set of principles" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 76, + 103, + 278, + 246 + ], + "lines": [ + { + "bbox": [ + 76, + 103, + 278, + 246 + ], + "spans": [ + { + "bbox": [ + 76, + 103, + 278, + 246 + ], + "type": "image", + "image_path": "991f8b990d829f5d53267a9a3d5ba3455b2f1bb1f0c4b94c8bbcfcf10d48c2e9.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 301, + 90, + 506, + 100 + ], + "lines": [ + { + "bbox": [ + 301, + 90, + 506, + 100 + ], + "spans": [ + { + "bbox": [ + 301, + 90, + 506, + 100 + ], + "type": "text", + "content": "(b) Construct a preference dataset with 2D fine-grained signals" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 304, + 111, + 440, + 120 + ], + "lines": [ + { + "bbox": [ + 304, + 111, + 440, + 120 + ], + "spans": [ + { + "bbox": [ + 304, + 111, + 440, + 120 + ], + "type": "text", + "content": "Query: Compare K-Means and Hierarchical Clustering." + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 307, + 137, + 473, + 144 + ], + "lines": [ + { + "bbox": [ + 307, + 137, + 473, + 144 + ], + "spans": [ + { + "bbox": [ + 307, + 137, + 473, + 144 + ], + "type": "text", + "content": "Both K-Means and Hierarchical Clustering are unsupervised learning" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 307, + 144, + 496, + 158 + ], + "lines": [ + { + "bbox": [ + 307, + 144, + 496, + 158 + ], + "spans": [ + { + "bbox": [ + 307, + 144, + 496, + 158 + ], + "type": "text", + "content": "techniques used for cluster analysis: K-Means Clustering: K-Means is a widely used algorithm for clustering data. And, ..." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 307, + 159, + 499, + 168 + ], + "lines": [ + { + "bbox": [ + 307, + 159, + 499, + 168 + ], + "spans": [ + { + "bbox": [ + 307, + 159, + 499, + 168 + ], + "type": "text", + "content": "Helpfulness: 4, Correctness: 4, Safety: 4, Completeness: 2, Clarity: 4" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 306, + 171, + 500, + 179 + ], + "lines": [ + { + "bbox": [ + 306, + 171, + 500, + 179 + ], + "spans": [ + { + "bbox": [ + 306, + 171, + 500, + 179 + ], + "type": "text", + "content": "Rejected: " + }, + { + "bbox": [ + 306, + 171, + 500, + 179 + ], + "type": "inline_equation", + "content": "{}_{1}^{1}" + }, + { + "bbox": [ + 306, + 171, + 500, + 179 + ], + "type": "text", + "content": " Helpfulness: 2,Correctness: 4,Safety: 4,Completeness: 1,Clarity: 3" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_footnote" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 307, + 207, + 508, + 259 + ], + "blocks": [ + { + "bbox": [ + 304, + 126, + 504, + 135 + ], + "lines": [ + { + "bbox": [ + 304, + 126, + 504, + 135 + ], + "spans": [ + { + "bbox": [ + 304, + 126, + 504, + 135 + ], + "type": "text", + "content": "Chosen: Helpfulness: 4, Correctness: 4, Safety: 4, Completeness: 1, Clarity: 4" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 308, + 185, + 505, + 206 + ], + "lines": [ + { + "bbox": [ + 308, + 185, + 505, + 206 + ], + "spans": [ + { + "bbox": [ + 308, + 185, + 505, + 206 + ], + "type": "text", + "content": "I will introduce the comparison of these two algorithms in detail! They are two of the most popular clustering algorithms used in unsupervised learning. Both have their own advantages and ..." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_footnote" + }, + { + "bbox": [ + 307, + 207, + 508, + 259 + ], + "lines": [ + { + "bbox": [ + 307, + 207, + 508, + 259 + ], + "spans": [ + { + "bbox": [ + 307, + 207, + 508, + 259 + ], + "type": "image", + "image_path": "d61f8b3facd25fb24fa84828860648a6afb1706b70d6fade51381b615e99ce53.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 75, + 270, + 521, + 396 + ], + "blocks": [ + { + "bbox": [ + 84, + 259, + 218, + 269 + ], + "lines": [ + { + "bbox": [ + 84, + 259, + 218, + 269 + ], + "spans": [ + { + "bbox": [ + 84, + 259, + 218, + 269 + ], + "type": "text", + "content": "Step2: Align the LLM with 2D-DPO" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + }, + { + "bbox": [ + 75, + 270, + 521, + 396 + ], + "lines": [ + { + "bbox": [ + 75, + 270, + 521, + 396 + ], + "spans": [ + { + "bbox": [ + 75, + 270, + 521, + 396 + ], + "type": "image", + "image_path": "16ff536515244ed5179d47f447f0c35f97c9d249ad5809bcfb1a3d7ee9b67a32.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 405, + 524, + 442 + ], + "lines": [ + { + "bbox": [ + 67, + 405, + 524, + 442 + ], + "spans": [ + { + "bbox": [ + 67, + 405, + 524, + 442 + ], + "type": "text", + "content": "Figure 2: Illustration of our proposed 2D-DPO. Firstly, we develop principles for preference annotation on different aspects, and collect scores across different segments and aspects for pairwise responses, leading to 2-dimensional signals. Secondly, we apply 2D-DPO on the constructed signals with decomposed training objective." + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_caption" + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 454, + 274, + 468 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 454, + 274, + 468 + ], + "spans": [ + { + "bbox": [ + 67, + 454, + 274, + 468 + ], + "type": "text", + "content": "model can be expressed in the following form:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 68, + 476, + 289, + 491 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 476, + 289, + 491 + ], + "spans": [ + { + "bbox": [ + 68, + 476, + 289, + 491 + ], + "type": "interline_equation", + "content": "p ^ {*} \\left(\\tau^ {w} \\succeq \\tau^ {l}\\right) = \\tag {3}", + "image_path": "5e962511a4ada1397e834af162dd2ee9911b043eedff28201d5a3e4b430044ba.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 69, + 495, + 288, + 540 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 495, + 288, + 540 + ], + "spans": [ + { + "bbox": [ + 69, + 495, + 288, + 540 + ], + "type": "interline_equation", + "content": "\\frac {\\exp \\left(\\sum_ {i = 1} ^ {N} r (\\mathbf {s} _ {i} ^ {w} , \\mathbf {a} _ {i} ^ {w})\\right)}{\\exp \\left(\\sum_ {i = 1} ^ {N} r (\\mathbf {s} _ {i} ^ {w} , \\mathbf {a} _ {i} ^ {w})\\right) + \\exp \\left(\\sum_ {i = 1} ^ {M} r (\\mathbf {s} _ {i} ^ {l} , \\mathbf {a} _ {i} ^ {l})\\right)},", + "image_path": "408dded3cccecb6c9fcd54b6785e276c9144c00fb38a3b6ab4d05dc3f7b1294c.jpg" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "inline_equation", + "content": "\\tau^w" + }, + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "inline_equation", + "content": "\\tau^l" + }, + { + "bbox": [ + 67, + 548, + 290, + 616 + ], + "type": "text", + "content": " represent the winning and losing trajectories, respectively. In this context, a represents the next generated token, and s denotes the current state, consisting of the prompt along with all previously generated tokens." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 617, + 290, + 670 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 617, + 290, + 670 + ], + "spans": [ + { + "bbox": [ + 67, + 617, + 290, + 670 + ], + "type": "text", + "content": "Subsequently, based on the fixed point solution under the general maximum entropy RL setting (Ziebart, 2010; Levine, 2018), Rafailov et al. derived the form of DPO in token-level MDP:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 88, + 681, + 290, + 772 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 88, + 681, + 290, + 772 + ], + "spans": [ + { + "bbox": [ + 88, + 681, + 290, + 772 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {D P O} \\left(\\pi_ {\\theta}; \\pi_ {r e f}\\right) = \\tag {4} \\\\ - \\mathbb {E} \\left[ \\log \\sigma \\left(\\beta \\sum_ {t = 0} ^ {N - 1} \\log \\frac {\\pi_ {\\theta} \\left(a _ {w} ^ {t} \\mid s _ {w} ^ {t}\\right)}{\\pi_ {r e f} \\left(a _ {w} ^ {t} \\mid s _ {w} ^ {t}\\right)} \\right. \\right. \\\\ \\left. - \\beta \\sum_ {t = 0} ^ {M - 1} \\log \\left. \\frac {\\pi_ {\\theta} (a _ {l} ^ {t} \\mid s _ {l} ^ {t})}{\\pi_ {r e f} (a _ {l} ^ {t} \\mid s _ {l} ^ {t})}\\right) \\right]. \\\\ \\end{array}", + "image_path": "c5d822c42103d5962b9acb11c0098d6e68625e8bcffc628bd5af3db541432e4b.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "spans": [ + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "content": "In the above process, Rafailov et al. combined Ng et al.'s definition of equivalence between two reward functions through a potential function and concluded that " + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "inline_equation", + "content": "\\beta \\log \\frac{\\pi_{\\theta}^{*}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}{\\pi_{ref}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}" + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "inline_equation", + "content": "r(\\mathbf{s}_t,\\mathbf{a}_t)" + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "content": " can equivalently yield the same optimal policy. Furthermore, " + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "inline_equation", + "content": "\\beta \\log \\frac{\\pi_{\\theta}^{*}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}{\\pi_{ref}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}" + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "content": " is precisely the optimal advantage function " + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "inline_equation", + "content": "A^{*}(\\mathbf{s}_{t},\\mathbf{a}_{t})" + }, + { + "bbox": [ + 302, + 454, + 526, + 570 + ], + "type": "text", + "content": " . For a detailed derivation, please refer to Appendix A.2 and A.3." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 302, + 581, + 526, + 689 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 581, + 526, + 689 + ], + "spans": [ + { + "bbox": [ + 302, + 581, + 526, + 689 + ], + "type": "text", + "content": "2D-DPO. With the above conclusions, 2D supervision signals can be conveniently integrated into the alignment process. We achieve the integration by using the signals to calibrate the token-level advantage function " + }, + { + "bbox": [ + 302, + 581, + 526, + 689 + ], + "type": "inline_equation", + "content": "A(\\mathbf{s}_t, \\mathbf{a}_t)" + }, + { + "bbox": [ + 302, + 581, + 526, + 689 + ], + "type": "text", + "content": " for different segments. This approach equips the model with a direct perception of fine-grained preferences, avoiding the ambiguity of holistic rewards." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "content": "Specifically, we use the regularized fine-grained reward " + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "content": " as a coefficient, incorporating it into the token-level advantage function to obtain " + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\beta r\\log \\frac{\\pi_{\\theta}(\\mathbf{a}_t|\\mathbf{s}_t)}{\\pi_{ref}(\\mathbf{a}_t|\\mathbf{s}_t)}" + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "content": ", which integrates the fine-grained signals. In practice, this is equivalent to adjusting " + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "content": " within the original " + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "inline_equation", + "content": "A(\\mathbf{s}_t,\\mathbf{a}_t)" + }, + { + "bbox": [ + 302, + 691, + 525, + 775 + ], + "type": "text", + "content": ". We provide complete" + } + ] + } + ], + "index": 23 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 77, + 75, + 218, + 86 + ], + "type": "header", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 75, + 218, + 86 + ], + "spans": [ + { + "bbox": [ + 77, + 75, + 218, + 86 + ], + "type": "text", + "content": "Step1: Collect 2D fine-grain signals" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8167" + } + ] + } + ], + "index": 24 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 3 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 244, + 83 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 244, + 83 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 244, + 83 + ], + "type": "text", + "content": "proof of its feasibility in Appendix A.4." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 83, + 289, + 105 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 83, + 289, + 105 + ], + "spans": [ + { + "bbox": [ + 67, + 83, + 289, + 105 + ], + "type": "text", + "content": "The token-level DPO incorporating fine-grained signals is formalized as follows:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 74, + 116, + 290, + 195 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 74, + 116, + 290, + 195 + ], + "spans": [ + { + "bbox": [ + 74, + 116, + 290, + 195 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} (\\pi_ {\\theta}, D) = \\\\ - \\mathbb {E} _ {(\\tau_ {w}, \\tau_ {l}) \\sim D} \\log \\sigma (\\beta \\sum_ {k = 0} ^ {S _ {w} - 1} \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {w, k} \\log \\frac {\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)} \\\\ - \\beta \\sum_ {k = 0} ^ {S _ {l} - 1} \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {l, k} \\log \\frac {\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}, \\tag {5} \\\\ \\end{array}", + "image_path": "9c9e4dc1fe20ed81e472863ff0ab13dd5a7cc9fabb610f804ed5fe4be6444e77.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "spans": [ + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "inline_equation", + "content": "n_k" + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "content": " represents the first token of the " + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "content": "-th segment and " + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "inline_equation", + "content": "l_k" + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "content": " denotes the length of the " + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "inline_equation", + "content": "k" + }, + { + "bbox": [ + 68, + 207, + 291, + 233 + ], + "type": "text", + "content": "-th segment." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "spans": [ + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "text", + "content": "For handling multiple aspects, we use a classic weighted approach for integration, that is, " + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "inline_equation", + "content": "r_{w,k} = \\mathbf{W}\\tilde{\\mathbf{r}}_{w,k}" + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "text", + "content": ", where " + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "text", + "content": " represents the weights that sum up to 1, which reflect the importance of each aspect during the alignment process, and " + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "inline_equation", + "content": "\\tilde{\\mathbf{r}}_{w,k} = \\{r_{w,k,j}\\}_{j=1}^{A}" + }, + { + "bbox": [ + 67, + 235, + 290, + 318 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "spans": [ + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": "Segment Selection. The number of segments in the chosen and rejected responses may differ significantly, and typically only the segments with an impact on response preference need attention. Therefore, we select the top- " + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": " highest-scoring segments from the chosen response and the top- " + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": " lowest-scoring segments from the rejected response, where " + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "inline_equation", + "content": "N = \\min(S_w, S_l)" + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": ", further enhances the efficiency of model alignment training. Additionally, we group segments in pairs to provide clearer contrast during alignment, making it easier for the model to learn fine-grained differences between the chosen and rejected responses. These segments are paired to form " + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "inline_equation", + "content": "N" + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": " BT models. The feasibility of this rearrangement is based on the fact that the loss for a single-segment BT model can be treated as setting the " + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "inline_equation", + "content": "\\beta_t" + }, + { + "bbox": [ + 67, + 326, + 291, + 596 + ], + "type": "text", + "content": " of other parts to 0, as demonstrated in Appendix A.4. Thus, we obtain the token-level DPO formula incorporating fine-grained signals:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 70, + 618, + 290, + 704 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 70, + 618, + 290, + 704 + ], + "spans": [ + { + "bbox": [ + 70, + 618, + 290, + 704 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\mathcal {L} _ {\\text {g r o u p}} \\left(\\pi_ {\\theta}, D\\right) = \\\\ - \\mathbb {E} _ {(\\tau_ {w}, \\tau_ {l}) \\sim D} \\left[ \\sum_ {k = 0} ^ {N - 1} \\log \\sigma \\left(\\beta \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {w, k} \\log \\frac {\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}\\right) \\right. \\\\ \\left. \\left. - \\beta \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {l, k} \\log \\frac {\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}\\right) \\right]. \\tag {6} \\\\ \\end{array}", + "image_path": "990cb077a51ca58789e98509e8ad1bdb3d4166a170208fa23c7bb6b51f1c60e6.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 721, + 291, + 775 + ], + "type": "text", + "content": "As a result, we've formulated the definitive objective of 2D-DPO. This training objective allows for the direct integration of 2-D supervision signals into the alignment process, enabling LLMs to" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 111 + ], + "type": "text", + "content": "discern the different aspects lying in different segments in the responses, thereby promoting better alignment with human preferences." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 121, + 390, + 134 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 121, + 390, + 134 + ], + "spans": [ + { + "bbox": [ + 302, + 121, + 390, + 134 + ], + "type": "text", + "content": "4 Experiments" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 302, + 142, + 362, + 155 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 142, + 362, + 155 + ], + "spans": [ + { + "bbox": [ + 302, + 142, + 362, + 155 + ], + "type": "text", + "content": "4.1 Set-up" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 158, + 526, + 295 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 158, + 526, + 295 + ], + "spans": [ + { + "bbox": [ + 302, + 158, + 526, + 295 + ], + "type": "text", + "content": "Benchmark. Our method has been tested on three widely recognized instruction-following benchmarks: Arena-Hard (Li et al., 2024b), AlpacaEval 2.0 (Dubois et al., 2024), and MT-Bench (Zheng et al., 2023). Each benchmark comprises a diverse set of queries, and the answers are evaluated under the framework of LLM-as-a-Judge (Zheng et al., 2023). We use gpt-4-turbo-2024-04-09" + }, + { + "bbox": [ + 302, + 158, + 526, + 295 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 302, + 158, + 526, + 295 + ], + "type": "text", + "content": " as the judge model and the scores are reported following each benchmark's protocol." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 302, + 301, + 525, + 383 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 301, + 525, + 383 + ], + "spans": [ + { + "bbox": [ + 302, + 301, + 525, + 383 + ], + "type": "text", + "content": "Model. Our method is validated on two models, Qwen2-7B-Instruct (Yang et al., 2024a) and Llama3-8B-Instruct (AI@Meta, 2024). It deserves to be noticed that both models have undergone extensive instruction-tuning processes, therefore we directly perform preference optimization." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 390, + 525, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 390, + 525, + 417 + ], + "spans": [ + { + "bbox": [ + 302, + 390, + 525, + 417 + ], + "type": "text", + "content": "Baseline. We mainly compare our method with 0-dimensional preference optimization methods:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 316, + 423, + 526, + 711 + ], + "type": "list", + "angle": 0, + "index": 19, + "blocks": [ + { + "bbox": [ + 316, + 423, + 525, + 478 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 423, + 525, + 478 + ], + "spans": [ + { + "bbox": [ + 316, + 423, + 525, + 478 + ], + "type": "text", + "content": "DPO (Rafailov et al., 2023). This method leverages a mapping between reward functions and optimal policies to optimize the preference with a single stage of policy training." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 316, + 486, + 524, + 539 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 486, + 524, + 539 + ], + "spans": [ + { + "bbox": [ + 316, + 486, + 524, + 539 + ], + "type": "text", + "content": "- IPO (Azar et al., 2024). This method proposes a theoretically grounded approach method to replace pairwise preferences in DPO with pointwise rewards." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 316, + 547, + 525, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 547, + 525, + 588 + ], + "spans": [ + { + "bbox": [ + 316, + 547, + 525, + 588 + ], + "type": "text", + "content": "- KTO (Ethayarajh et al., 2024). This method proposes to maximize the utility of generations from non-paired data directly." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 316, + 597, + 524, + 650 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 597, + 524, + 650 + ], + "spans": [ + { + "bbox": [ + 316, + 597, + 524, + 650 + ], + "type": "text", + "content": "- ORPO (Hong et al., 2024). This method leverages a reference model-free monolithic odds ratio for contrasting favored and disfavored styles during the SFT stage." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 316, + 658, + 526, + 711 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 316, + 658, + 526, + 711 + ], + "spans": [ + { + "bbox": [ + 316, + 658, + 526, + 711 + ], + "type": "text", + "content": "- SimPO (Meng et al., 2024). This method proposes to use the average log probability of a sequence as the implicit reward, which eliminates the need for a reference model." + } + ] + } + ], + "index": 18 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 302, + 719, + 524, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 719, + 524, + 746 + ], + "spans": [ + { + "bbox": [ + 302, + 719, + 524, + 746 + ], + "type": "text", + "content": "We also compare our method with the following 1-dimensional preference optimization method:" + } + ] + } + ], + "index": 20 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 752, + 501, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 752, + 501, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 752, + 501, + 774 + ], + "type": "inline_equation", + "content": "^{2}" + }, + { + "bbox": [ + 302, + 752, + 501, + 774 + ], + "type": "text", + "content": "https://platform.openai.com/docs/models/gpt-4-turbo-and-gpt-4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8168" + } + ] + } + ], + "index": 22 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 4 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 69, + 526, + 380 + ], + "blocks": [ + { + "bbox": [ + 70, + 69, + 526, + 380 + ], + "lines": [ + { + "bbox": [ + 70, + 69, + 526, + 380 + ], + "spans": [ + { + "bbox": [ + 70, + 69, + 526, + 380 + ], + "type": "table", + "html": "
Policy ModelMethodsArena-HardAlpacaEval 2.0MT-Bench
WR (%)Avg. Len95% CILC (%)WR (%)Avg. LenTurn 1Turn 2Avg. Score
Qwen2-7B-InstructBase25.10583(-2.1, 2.0)30.6828.3218628.016.617.31
+ DPO29.40578(-1.8, 1.9)29.0726.8319968.116.457.28
+ IPO26.50556(-2.2, 2.2)28.7026.5819407.906.537.21
+ KTO26.10518(-2.1, 2.5)26.4623.0017308.116.407.26
+ ORPO25.40573(-2.2, 1.8)28.5827.7019368.096.527.31
+ SimPO29.00539(-1.9, 2.4)29.9427.7019048.066.507.28
+ TDPO25.90564(-2.0, 2.4)29.8127.3318968.056.467.26
+ 1D-DPO29.80574(-2.4, 2.2)31.0728.7019518.136.487.31
+ 2D-DPO30.30586(-1.9, 2.4)31.5128.9419948.186.687.43
Llama3-8B-InstructBase25.40599(-2.2, 2.4)27.0826.9619597.666.847.25
+ DPO25.90567(-1.9, 2.2)31.6830.3118837.646.607.20
+ IPO24.80548(-2.2, 1.8)29.6928.5718917.736.757.24
+ KTO25.20507(-1.9, 2.4)27.9527.0818357.656.657.15
+ ORPO25.60537(-1.7, 1.9)29.1928.5718927.756.687.22
+ SimPO26.30552(-1.8, 2.2)31.5530.1918797.966.707.33
+ TDPO23.40566(-1.7, 1.8)28.5726.9618817.956.807.38
+ 1D-DPO26.70563(-2.3, 1.8)31.7830.1918937.986.747.34
+ 2D-DPO27.00554(-1.9, 2.0)32.0630.5618848.046.847.44
Llama3-8B-SFTBase3.00478(-0.6, 0.7)5.844.7810865.714.515.11
+ DPO3.70447(-0.7, 0.7)8.105.6510356.004.415.21
+ SimPO3.40479(-0.7, 0.7)6.306.7116756.115.035.57
+ TDPO4.40438(-0.7, 0.8)9.216.029816.114.805.46
+ 2D-DPO6.90423(-0.9, 1.2)10.937.0211226.185.145.66
", + "image_path": "aa28aa3000e761ea6a81c2f93d108ed3c86d16afb48494f713277cefc1027640.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 386, + 525, + 412 + ], + "lines": [ + { + "bbox": [ + 67, + 386, + 525, + 412 + ], + "spans": [ + { + "bbox": [ + 67, + 386, + 525, + 412 + ], + "type": "text", + "content": "Table 1: Experiment results of different preference optimization methods on instruction-following benchmarks. We report the results on each benchmark based on their recommended metrics." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 81, + 433, + 291, + 488 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 81, + 433, + 291, + 488 + ], + "spans": [ + { + "bbox": [ + 81, + 433, + 291, + 488 + ], + "type": "text", + "content": "- TDPO (Zeng et al., 2024). This method proposes to control the KL divergence constraints for each token, aiming to strike a better balance between alignment and diversity." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 511, + 291, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 511, + 291, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 511, + 291, + 567 + ], + "type": "text", + "content": "Our method is evaluated under two configurations — 1D-DPO and 2D-DPO. 1D-DPO only incorporates a single aspect (helpfulness) as signals while 2D-DPO uses all five aspects." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 584, + 290, + 651 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 584, + 290, + 651 + ], + "spans": [ + { + "bbox": [ + 67, + 584, + 290, + 651 + ], + "type": "text", + "content": "Training. We perform preference optimization based on HelpSteer-2D, which is constructed based on HelpSteer2 (Wang et al., 2024b). We leverage gpt-4o-2024-05-133 to generate 2-dimensional scores which align with our requirements." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 655, + 291, + 723 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 655, + 291, + 723 + ], + "spans": [ + { + "bbox": [ + 67, + 655, + 291, + 723 + ], + "type": "text", + "content": "To make a fair comparison, all methods are combined with SFT loss with a coefficient of 0.1 except for " + }, + { + "bbox": [ + 67, + 655, + 291, + 723 + ], + "type": "inline_equation", + "content": "\\mathrm{ORPO^4}" + }, + { + "bbox": [ + 67, + 655, + 291, + 723 + ], + "type": "text", + "content": ". The other hyper-parameters are tuned to achieve an optimal performance for each method. Please refer to Appendix B.1 for more details." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 303, + 433, + 393, + 444 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 303, + 433, + 393, + 444 + ], + "spans": [ + { + "bbox": [ + 303, + 433, + 393, + 444 + ], + "type": "text", + "content": "4.2 Main Results" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 449, + 526, + 652 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 449, + 526, + 652 + ], + "spans": [ + { + "bbox": [ + 302, + 449, + 526, + 652 + ], + "type": "text", + "content": "The primary results are shown in Table 1. As can be seen, our proposed 2D-DPO outperforms existing methods across all three benchmarks, verifying the significance of 2-dimensional supervision in preference optimization. While previous methods mostly treat different segments uniformly with a singular scoring criterion, leading to insufficient supervision, this work scales the feedback to both aspect-level and segment-level, thereby improving the performance. Notice that all methods are conducted on the same group of queries and responses, and our method does not require an additional training stage or extra computation overhead; therefore, our method exploits the utility of direct preference optimization with minimal expense." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "spans": [ + { + "bbox": [ + 302, + 653, + 527, + 775 + ], + "type": "text", + "content": "Comparing the averaged length on Arena-Hard and AlpacaEval 2.0, we also notice that our method does not lead to more verbose responses. This demonstrates that 2-dimensional supervision helps mitigate the reward hacking issue (Singhal et al., 2023). While more fine-grained supervision is provided for preference pairs, the model would not unanimously favor more verbose responses, achieving more accurate alignment." + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 740, + 266, + 762 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 740, + 266, + 762 + ], + "spans": [ + { + "bbox": [ + 67, + 740, + 266, + 762 + ], + "type": "text", + "content": "3https://platform.openai.com/docs/models/gpt-4o" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 80, + 762, + 249, + 775 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 762, + 249, + 775 + ], + "spans": [ + { + "bbox": [ + 80, + 762, + 249, + 775 + ], + "type": "inline_equation", + "content": "^{4}" + }, + { + "bbox": [ + 80, + 762, + 249, + 775 + ], + "type": "text", + "content": "ORPO is conducted with SFT loss originally." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "text", + "content": "8169" + } + ] + } + ], + "index": 12 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 5 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 205 + ], + "type": "text", + "content": "In comparison to 2D-DPO, the performance of 1D-DPO, which is only scaled with segment-level signals, shows a noticeable decline, thereby demonstrating the gains achieved by incorporating aspect-level signals. Nevertheless, among various alignment methods, 1D-DPO remains highly competitive, outperforming all other methods except for 2D-DPO across different evaluation metrics, proving the effectiveness of the additional supervisory signals introduced at the segment level." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 206, + 291, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 206, + 291, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 206, + 291, + 354 + ], + "type": "text", + "content": "Furthermore, it deserves to be noted that TDPO also underperforms both 1D-DPO and 2D-DPO, which can be traced back to the design of the training objective. Despite the loss in TDPO being reassigned to each token based on KL-Divergence, the temperature for each token is not adjusted appropriately, resulting in a coarse optimization process. In contrast, our method meticulously adjust the temperature for each segment, aligning the update scale with the segment's importance across criteria, contributing to our superior performance." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 355, + 291, + 436 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 355, + 291, + 436 + ], + "spans": [ + { + "bbox": [ + 67, + 355, + 291, + 436 + ], + "type": "text", + "content": "We perform experiments on an SFT model that has not undergone alignment, where the advantage of 2D-DPO over other methods is more pronounced compared to its performance on instruct models. This may be because the improvement from additional alignment on instruct models is limited." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 445, + 177, + 458 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 445, + 177, + 458 + ], + "spans": [ + { + "bbox": [ + 67, + 445, + 177, + 458 + ], + "type": "text", + "content": "4.3 Detailed Analysis" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 462, + 291, + 516 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 462, + 291, + 516 + ], + "spans": [ + { + "bbox": [ + 67, + 462, + 291, + 516 + ], + "type": "text", + "content": "To further show the effectiveness of 2D-DPO, we conduct ablation studies and delve into a detailed analysis of the model's performance. Additionally, we present a case study in Appendix E." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "spans": [ + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": "The Influence of " + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": ". Table 2 shows the results of 2D-DPO with different values of " + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": " (temperature). As " + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": " increases, we observe a consistent trend across three benchmarks: performance first rises and then falls. This is because a higher " + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": " can amplify the divergence penalty within the RL optimization objective, thereby avoiding model degradation. However, an overly high " + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 523, + 291, + 659 + ], + "type": "text", + "content": " would reduce the overall optimal ceiling and limit the potential gains from alignment (Ahrabian et al., 2024)." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 666, + 291, + 746 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 666, + 291, + 746 + ], + "spans": [ + { + "bbox": [ + 67, + 666, + 291, + 746 + ], + "type": "text", + "content": "Performance on Different Aspects. We evaluated the performance of the models aligned using different methods across various aspects. We selected AlpacaEval 2.0 (Dubois et al., 2024) which offers diverse instructions as the query set, and obtained the responses of different models on this set" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 302, + 71, + 526, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 71, + 526, + 343 + ], + "spans": [ + { + "bbox": [ + 302, + 71, + 526, + 343 + ], + "type": "text", + "content": "as the evaluation targets. The evaluation prompt was consistent with the prompts presented in Section 3.1. For aspects that are independent among segments, we took the average score of all segments as the score for that response. For aspects that are not independent among segments (completeness and clarity), we select the score of the last segment. The average score of all responses is taken as the final result. As shown in Figure 3, our 2D-DPO can achieve the best results in all aspects, striking a balance between different rubrics of human preferences. 1D-DPO with only segment-level feedback underperforms, as response-level alignment still leads to coarse refinement. We also notice different methods exhibit minimal difference upon safety and correctness, which might be due to Qwen2-7B-Instruct already undergoing an alignment process on these aspects. For the other aspects that is not covered by the process, 2D-DPO can achieve more pronounced improvement." + } + ] + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 316, + 358, + 514, + 539 + ], + "blocks": [ + { + "bbox": [ + 316, + 358, + 514, + 539 + ], + "lines": [ + { + "bbox": [ + 316, + 358, + 514, + 539 + ], + "spans": [ + { + "bbox": [ + 316, + 358, + 514, + 539 + ], + "type": "image", + "image_path": "239ee4fb9f5c6efd7008da3c2c12a4b0ddc5430395d5b867fa3d5881538fb1fa.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 547, + 525, + 571 + ], + "lines": [ + { + "bbox": [ + 302, + 547, + 525, + 571 + ], + "spans": [ + { + "bbox": [ + 302, + 547, + 525, + 571 + ], + "type": "text", + "content": "Figure 3: The relative performance on different aspects of different alignment methods." + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_caption" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 308, + 613, + 415, + 694 + ], + "blocks": [ + { + "bbox": [ + 308, + 613, + 415, + 694 + ], + "lines": [ + { + "bbox": [ + 308, + 613, + 415, + 694 + ], + "spans": [ + { + "bbox": [ + 308, + 613, + 415, + 694 + ], + "type": "image", + "image_path": "281f29df16f6b76a0cb880a5a75186479cf41d6602c47c9cc077412c0d59c0b8.jpg" + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 707, + 526, + 767 + ], + "lines": [ + { + "bbox": [ + 302, + 707, + 526, + 767 + ], + "spans": [ + { + "bbox": [ + 302, + 707, + 526, + 767 + ], + "type": "text", + "content": "Figure 4: The trends in reward scores and accuracy over training steps across DPO, TDPO, 1D-DPO, and 2D-DPO. (a) Rewards of preferred (solid lines) and dispreferred (dashed lines) responses. (b) Reward accuracy compared with preference annotation." + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_caption" + } + ], + "index": 10 + }, + { + "type": "image", + "bbox": [ + 417, + 613, + 521, + 694 + ], + "blocks": [ + { + "bbox": [ + 417, + 613, + 521, + 694 + ], + "lines": [ + { + "bbox": [ + 417, + 613, + 521, + 694 + ], + "spans": [ + { + "bbox": [ + 417, + 613, + 521, + 694 + ], + "type": "image", + "image_path": "234dc94cbe4873015d28ee946181e4beddc6bef7d48478c68cb86475b3bfca5f.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 67, + 752, + 254, + 773 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 752, + 254, + 773 + ], + "spans": [ + { + "bbox": [ + 67, + 752, + 254, + 773 + ], + "type": "inline_equation", + "content": "^{5}" + }, + { + "bbox": [ + 67, + 752, + 254, + 773 + ], + "type": "text", + "content": "https://huggingface.co/princeton-nlp/Llama-3-Base-8B-SFT" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8170" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 6 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 100, + 68, + 496, + 158 + ], + "blocks": [ + { + "bbox": [ + 100, + 68, + 496, + 158 + ], + "lines": [ + { + "bbox": [ + 100, + 68, + 496, + 158 + ], + "spans": [ + { + "bbox": [ + 100, + 68, + 496, + 158 + ], + "type": "table", + "html": "
ModelβArena-HardAlpacaEval 2.0MT-Bench
WR (%)Avg. Len95% CILC (%)WR (%)Avg. LenTurn 1Turn 2Avg. Score
Qwen2-7B0.129.2557(-2.2, 1.8)30.4829.2119618.006.607.30
0.230.3586(-1.9, 2.4)31.5128.9419948.186.687.43
-Instruct0.527.4578(-1.8, 1.9)29.6229.2119767.956.607.28
+2D-DPO0.728.6595(-1.6, 2.2)28.2428.1719977.836.657.24
1.028.0576(-2.0, 1.4)28.9728.4919817.776.657.21
", + "image_path": "fb6c521f3830edc6f6cf5dd790ab9d7ca46e3f3f5ad6fa86064dce65d504c3ff.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 119, + 166, + 473, + 179 + ], + "lines": [ + { + "bbox": [ + 119, + 166, + 473, + 179 + ], + "spans": [ + { + "bbox": [ + 119, + 166, + 473, + 179 + ], + "type": "text", + "content": "Table 2: Experiment results on three benchmarks of 2D-DPO with different values of " + }, + { + "bbox": [ + 119, + 166, + 473, + 179 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 119, + 166, + 473, + 179 + ], + "type": "text", + "content": " ." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + }, + { + "type": "image", + "bbox": [ + 73, + 190, + 286, + 272 + ], + "blocks": [ + { + "bbox": [ + 73, + 190, + 286, + 272 + ], + "lines": [ + { + "bbox": [ + 73, + 190, + 286, + 272 + ], + "spans": [ + { + "bbox": [ + 73, + 190, + 286, + 272 + ], + "type": "image", + "image_path": "838d3aac90135b9dbb152cfb611084762f4ca42946f774121177ce4629189d22.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 67, + 284, + 291, + 344 + ], + "lines": [ + { + "bbox": [ + 67, + 284, + 291, + 344 + ], + "spans": [ + { + "bbox": [ + 67, + 284, + 291, + 344 + ], + "type": "text", + "content": "Figure 5: The trends in sequential KL divergence between the policy model and the reference model over training steps across DPO, TDPO, 1D-DPO, and 2D-DPO. (a) KL divergence for preferred responses. (b) KL divergence for dispreferred responses." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_caption" + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 354, + 291, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 354, + 291, + 666 + ], + "spans": [ + { + "bbox": [ + 67, + 354, + 291, + 666 + ], + "type": "text", + "content": "Training Indicators. We analyze the reward of different responses during training in Figure 4(a) " + }, + { + "bbox": [ + 67, + 354, + 291, + 666 + ], + "type": "inline_equation", + "content": "^{6}" + }, + { + "bbox": [ + 67, + 354, + 291, + 666 + ], + "type": "text", + "content": ". We can observe that the reward scores of the preferred responses in our method increase rapidly while the reward scores of the dispreferred responses decrease significantly, resulting in the largest margin. Figure 4(b) shows the reward accuracy trends during training. In this context, accuracy is defined as the proportion of instances where the reward score for the preferred response is higher than that for the dispreferred response. Our method not only reaches the highest accuracy fastest but also achieves the best overall accuracy, demonstrating that our method facilitates more efficient training. In Figure 5, we show the trends of sequential KL divergence over training steps for both preferred and dispreferred responses. 2D-DPO exhibits consistently lower KL divergence compared to DPO and 1D-DPO on both preferred and dispreferred responses. This indicates that 2-dimensional supervision can effectively balance KL divergence, preventing excessive deviations from the original model, thereby ensuring stronger training stability." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 675, + 291, + 743 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 675, + 291, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 675, + 291, + 743 + ], + "type": "text", + "content": "Fine-grained Reward Assignment. In Figure 6, we compare the fine-grained reward assignment for the same sample using various reward models: (a) 1D-RM utilizes only segment-level reward signals; (b) DPO is trained through direct prefer" + } + ] + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 305, + 188, + 525, + 295 + ], + "blocks": [ + { + "bbox": [ + 305, + 188, + 525, + 295 + ], + "lines": [ + { + "bbox": [ + 305, + 188, + 525, + 295 + ], + "spans": [ + { + "bbox": [ + 305, + 188, + 525, + 295 + ], + "type": "image", + "image_path": "7d55554b7d9197619815a975ab1c8203e046c454c7efc423726ecdee182f9b87.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 302, + 305, + 525, + 341 + ], + "lines": [ + { + "bbox": [ + 302, + 305, + 525, + 341 + ], + "spans": [ + { + "bbox": [ + 302, + 305, + 525, + 341 + ], + "type": "text", + "content": "Figure 6: The fine-grained reward assignment of different reward models on the same sample. All reward models are trained on HelpSteeer-2D." + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_caption" + } + ], + "index": 6 + }, + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "spans": [ + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "text", + "content": "ence alignment" + }, + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "inline_equation", + "content": "^{7}" + }, + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "text", + "content": "; (c) ORM is trained with response-level reward signals; (d) 2D-RM is trained with 2-dimensional supervision signals" + }, + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "inline_equation", + "content": "^{8}" + }, + { + "bbox": [ + 301, + 350, + 526, + 541 + ], + "type": "text", + "content": ". As can be seen, 1) ORM fails to distinguish preference differences between segments, leading to inaccurate global scores; 2) 1D-RM identifies preferences for different segments but does not detect the redundancy issue in the second sentence; 3) DPO can identify certain preferred tokens, but also assigns high scores to dispreferred tokens and overlooks some preferred tokens, introducing significant noise. In contrast, our method not only distinguishes preferences across segments more effectively but also provides more accurate scoring." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 302, + 550, + 381, + 563 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 550, + 381, + 563 + ], + "spans": [ + { + "bbox": [ + 302, + 550, + 381, + 563 + ], + "type": "text", + "content": "5 Conclusion" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 301, + 571, + 525, + 666 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 301, + 571, + 525, + 666 + ], + "spans": [ + { + "bbox": [ + 301, + 571, + 525, + 666 + ], + "type": "text", + "content": "In this work, we introduce a novel approach to scale DPO with 2-dimensional reward signals. We first construct a preference dataset on both segment and aspect levels and then develop a 2D-DPO objective that learns the 2D preferences concurrently. Experiment results on popular benchmarks verified the effectiveness of our proposed method." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 302, + 667, + 525, + 708 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 667, + 525, + 708 + ], + "spans": [ + { + "bbox": [ + 302, + 667, + 525, + 708 + ], + "type": "text", + "content": "While the boost of direct preference alignment methods has promoted LLM development and application, most work focuses on the design of loss" + } + ] + } + ], + "index": 11 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 302, + 714, + 525, + 743 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 714, + 525, + 743 + ], + "spans": [ + { + "bbox": [ + 302, + 714, + 525, + 743 + ], + "type": "text", + "content": "The reward of each token is computed as " + }, + { + "bbox": [ + 302, + 714, + 525, + 743 + ], + "type": "inline_equation", + "content": "r_{\\theta}\\left(x,y^{t}\\right) = \\beta \\log \\frac{\\pi_{\\theta}\\left(y^{t}|y^{< t},x\\right)}{\\pi_{\\mathrm{ref}}\\left(y^{t}|y^{< t},x\\right)}" + }, + { + "bbox": [ + 302, + 714, + 525, + 743 + ], + "type": "text", + "content": " consistent with Rafailov et al. (2024)." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 302, + 743, + 525, + 774 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 302, + 743, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 302, + 743, + 525, + 774 + ], + "type": "text", + "content": "The reward scores across different dimensions are normalized between 0 and 1 and then averaged as the final reward, to enable a more intuitive comparison." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 751, + 290, + 778 + ], + "type": "page_footnote", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 751, + 290, + 778 + ], + "spans": [ + { + "bbox": [ + 67, + 751, + 290, + 778 + ], + "type": "text", + "content": "The reward score of each sample in all methods is defined as " + }, + { + "bbox": [ + 67, + 751, + 290, + 778 + ], + "type": "inline_equation", + "content": "\\beta \\log \\frac{\\pi_{\\theta}(y|x)}{\\pi_{ref}(y|x)}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 308, + 791 + ], + "type": "text", + "content": "8171" + } + ] + } + ], + "index": 16 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 7 + }, + { + "para_blocks": [ + { + "bbox": [ + 67, + 71, + 291, + 126 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 71, + 291, + 126 + ], + "spans": [ + { + "bbox": [ + 67, + 71, + 291, + 126 + ], + "type": "text", + "content": "function instead of the intricacies of human preferences. In the future, we will continue our research on multi-dimensional feedback, aimed at optimally aligned preference optimization." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 134, + 149, + 147 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 134, + 149, + 147 + ], + "spans": [ + { + "bbox": [ + 67, + 134, + 149, + 147 + ], + "type": "text", + "content": "6 Limitations" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 156, + 291, + 333 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 156, + 291, + 333 + ], + "spans": [ + { + "bbox": [ + 67, + 156, + 291, + 333 + ], + "type": "text", + "content": "Our work still has some limitations: 1) Due to the lack of open-source codes and time limitations, we only compare with one 1-dimensional DPO method. More comparisons should be made on related work to improve the credibility of our work. 2) Our method should also be validated on foundation ability benchmarks such as MMLU (Hendrycks et al., 2021), to verify that our method would not lead to the degradation of fundamental abilities. 3) Due to resource limitations, the effectiveness of our method is only verified on 7B-sized models. The scaling ability on larger models deserves our future exploration to promote its application." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 354, + 127, + 366 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 354, + 127, + 366 + ], + "spans": [ + { + "bbox": [ + 69, + 354, + 127, + 366 + ], + "type": "text", + "content": "References" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 372, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 69, + 372, + 291, + 417 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 372, + 291, + 417 + ], + "spans": [ + { + "bbox": [ + 69, + 372, + 291, + 417 + ], + "type": "text", + "content": "Kian Ahrabian, Xihui Lin, Barun Patra, Vishrav Chaudhary, Alon Benhaim, Jay Pujara, and Xia Song. 2024. The hitchhiker's guide to human alignment with* po. arXiv preprint arXiv:2407.15229." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 425, + 224, + 436 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 425, + 224, + 436 + ], + "spans": [ + { + "bbox": [ + 69, + 425, + 224, + 436 + ], + "type": "text", + "content": "AI@Meta. 2024. Llama 3 model card." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 444, + 291, + 521 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 444, + 291, + 521 + ], + "spans": [ + { + "bbox": [ + 69, + 444, + 291, + 521 + ], + "type": "text", + "content": "Mohammad Gheshlaghi Azar, Zhaohan Daniel Guo, Bilal Piot, Remi Munos, Mark Rowland, Michal Valko, and Daniele Calandriello. 2024. A general theoretical paradigm to understand learning from human preferences. In International Conference on Artificial Intelligence and Statistics, pages 4447-4455. PMLR." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 529, + 291, + 595 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 529, + 291, + 595 + ], + "spans": [ + { + "bbox": [ + 69, + 529, + 291, + 595 + ], + "type": "text", + "content": "Ge Bai, Jie Liu, Xingyuan Bu, Yancheng He, Jiaheng Liu, Zhanhui Zhou, Zhuoran Lin, Wenbo Su, Tiezheng Ge, Bo Zheng, et al. 2024. Mt-bench-101: A fine-grained benchmark for evaluating large language models in multi-turn dialogues. arXiv preprint arXiv:2402.14762." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 603, + 291, + 670 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 603, + 291, + 670 + ], + "spans": [ + { + "bbox": [ + 69, + 603, + 291, + 670 + ], + "type": "text", + "content": "Yuntao Bai, Andy Jones, Kamal Ndousse, Amanda Askell, Anna Chen, Nova DasSarma, Dawn Drain, Stanislav Fort, Deep Ganguli, Tom Henighan, et al. 2022. Training a helpful and harmless assistant with reinforcement learning from human feedback. arXiv preprint arXiv:2204.05862." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 677, + 291, + 722 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 677, + 291, + 722 + ], + "spans": [ + { + "bbox": [ + 69, + 677, + 291, + 722 + ], + "type": "text", + "content": "Meng Cao, Lei Shu, Lei Yu, Yun Zhu, Nevan Wichers, Yinxiao Liu, and Lei Meng. 2024. Drlc: Reinforcement learning with dense rewards from llm critic. arXiv preprint arXiv:2401.07382." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 69, + 729, + 291, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 729, + 291, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 729, + 291, + 774 + ], + "type": "text", + "content": "Alex J Chan, Hao Sun, Samuel Holt, and Mihaela van der Schaar. 2024. Dense reward for free in reinforcement learning from human feedback. arXiv preprint arXiv:2402.00782." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 774 + ], + "type": "list", + "angle": 0, + "index": 24, + "blocks": [ + { + "bbox": [ + 304, + 72, + 526, + 128 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 526, + 128 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 526, + 128 + ], + "type": "text", + "content": "Zhipeng Chen, Kun Zhou, Wayne Xin Zhao, Junchen Wan, Fuzheng Zhang, Di Zhang, and Ji-Rong Wen. 2024. Improving large language models via fine-grained reinforcement learning with minimum editing constraint. arXiv preprint arXiv:2401.06081." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 137, + 526, + 183 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 137, + 526, + 183 + ], + "spans": [ + { + "bbox": [ + 304, + 137, + 526, + 183 + ], + "type": "text", + "content": "Karin de Langis, Ryan Koo, and Dongyeop Kang. 2024. Reinforcement learning with dynamic multi-reward weighting for multi-style controllable generation. arXiv preprint arXiv:2402.14146." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 192, + 526, + 238 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 192, + 526, + 238 + ], + "spans": [ + { + "bbox": [ + 304, + 192, + 526, + 238 + ], + "type": "text", + "content": "Yann Dubois, Balázs Galambosi, Percy Liang, and Tatsunori B Hashimoto. 2024. Length-controlled alpacaeval: A simple way to debias automatic evaluators. arXiv preprint arXiv:2404.04475." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 248, + 526, + 292 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 248, + 526, + 292 + ], + "spans": [ + { + "bbox": [ + 304, + 248, + 526, + 292 + ], + "type": "text", + "content": "Kawin Ethayarajh, Winnie Xu, Niklas Muennighoff, Dan Jurafsky, and Douwe Kiela. 2024. Kto: Model alignment as prospect theoretic optimization. arXiv preprint arXiv:2402.01306." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 302, + 526, + 347 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 302, + 526, + 347 + ], + "spans": [ + { + "bbox": [ + 304, + 302, + 526, + 347 + ], + "type": "text", + "content": "Weixin Feng, Xingyuan Bu, Chenchen Zhang, and Xubin Li. 2022. Beyond bounding box: Multi-modal knowledge learning for object detection. arXiv preprint arXiv:2205.04072." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 356, + 526, + 401 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 356, + 526, + 401 + ], + "spans": [ + { + "bbox": [ + 304, + 356, + 526, + 401 + ], + "type": "text", + "content": "Geyang Guo, Ranchi Zhao, Tianyi Tang, Wayne Xin Zhao, and Ji-Rong Wen. 2023. Beyond imitation: Leveraging fine-grained quality signals for alignment. arXiv preprint arXiv:2311.04072." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 412, + 526, + 477 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 412, + 526, + 477 + ], + "spans": [ + { + "bbox": [ + 304, + 412, + 526, + 477 + ], + "type": "text", + "content": "Yiju Guo, Ganqu Cui, Lifan Yuan, Ning Ding, Jiexin Wang, Huimin Chen, Bowen Sun, Ruobing Xie, Jie Zhou, Yankai Lin, et al. 2024. Controllable preference optimization: Toward controllable multi-objective alignment. arXiv preprint arXiv:2402.19085." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 488, + 526, + 533 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 488, + 526, + 533 + ], + "spans": [ + { + "bbox": [ + 304, + 488, + 526, + 533 + ], + "type": "text", + "content": "Dan Hendrycks, Collin Burns, Steven Basart, Andy Zou, Mantas Mazeika, Dawn Song, and Jacob Steinhardt. 2021. Measuring massive multitask language understanding. Preprint, arXiv:2009.03300." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 543, + 526, + 577 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 543, + 526, + 577 + ], + "spans": [ + { + "bbox": [ + 304, + 543, + 526, + 577 + ], + "type": "text", + "content": "Jiwoo Hong, Noah Lee, and James Thorne. 2024. Reference-free monolithic preference optimization with odds ratio. arXiv preprint arXiv:2403.07691." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 587, + 526, + 654 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 587, + 526, + 654 + ], + "spans": [ + { + "bbox": [ + 304, + 587, + 526, + 654 + ], + "type": "text", + "content": "Jiaming Ji, Mickel Liu, Josef Dai, Xuehai Pan, Chi Zhang, Ce Bian, Boyuan Chen, Ruiyang Sun, Yizhou Wang, and Yaodong Yang. 2024. Beavertails: Towards improved safety alignment of llm via a human-preference dataset. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 304, + 664, + 526, + 729 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 664, + 526, + 729 + ], + "spans": [ + { + "bbox": [ + 304, + 664, + 526, + 729 + ], + "type": "text", + "content": "Yuxin Jiang, Bo Huang, Yufei Wang, Xingshan Zeng, Liangyou Li, Yasheng Wang, Xin Jiang, Lifeng Shang, Ruiming Tang, and Wei Wang. 2024. Bridging and modeling correlations in pairwise data for direct preference optimization. arXiv preprint arXiv:2408.07471." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "spans": [ + { + "bbox": [ + 304, + 740, + 526, + 774 + ], + "type": "text", + "content": "Sergey Levine. 2018. Reinforcement learning and control as probabilistic inference: Tutorial and review. ArXiv, abs/1805.00909." + } + ] + } + ], + "index": 23 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8172" + } + ] + } + ], + "index": 25 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 8 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 773 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 69, + 72, + 289, + 137 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 289, + 137 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 289, + 137 + ], + "type": "text", + "content": "Shilong Li, Yancheng He, Hangyu Guo, Xingyuan Bu, Ge Bai, Jie Liu, Jiaheng Liu, Xingwei Qu, Yangguang Li, Wanli Ouyang, et al. 2024a. Graphreader: Building graph-based agent to enhance long-context abilities of large language models. arXiv preprint arXiv:2406.14550." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 149, + 289, + 204 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 289, + 204 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 289, + 204 + ], + "type": "text", + "content": "Tianle Li, Wei-Lin Chiang, Evan Frick, Lisa Dunlap, Tianhao Wu, Banghua Zhu, Joseph E Gonzalez, and Ion Stoica. 2024b. From crowdsourced data to high-quality benchmarks: Arena-hard and benchbuilder pipeline. arXiv preprint arXiv:2406.11939." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 215, + 289, + 269 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 215, + 289, + 269 + ], + "spans": [ + { + "bbox": [ + 69, + 215, + 289, + 269 + ], + "type": "text", + "content": "Hunter Lightman, Vineet Kosaraju, Yura Burda, Harri Edwards, Bowen Baker, Teddy Lee, Jan Leike, John Schulman, Ilya Sutskever, and Karl Cobbe. 2023. Let's verify step by step. arXiv preprint arXiv:2305.20050." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 280, + 289, + 367 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 280, + 289, + 367 + ], + "spans": [ + { + "bbox": [ + 69, + 280, + 289, + 367 + ], + "type": "text", + "content": "Jianyu Liu, Hangyu Guo, Ranjie Duan, Xingyuan Bu, Yancheng He, Shilong Li, Hui Huang, Jiaheng Liu, Yingshui Tan, Yucheng Wang, Chenchen Jing, Xingwei Qu, Xiao Zhang, Pei Wang, Yanan Wu, Jihao Gu, Yangguang Li, and Jianke Zhu. 2024a. Dream: Disentangling risks to enhance safety alignment in multimodal large language models. arXiv preprint arXiv." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 380, + 289, + 433 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 380, + 289, + 433 + ], + "spans": [ + { + "bbox": [ + 69, + 380, + 289, + 433 + ], + "type": "text", + "content": "Jie Liu, Zhanhui Zhou, Jiaheng Liu, Xingyuan Bu, Chao Yang, Han-Sen Zhong, and Wanli Ouyang. 2024b. Iterative length-regularized direct preference optimization: A case study on improving 7b language models to gpt-4 level. arXiv preprint arXiv:2406.11817." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 444, + 289, + 488 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 444, + 289, + 488 + ], + "spans": [ + { + "bbox": [ + 69, + 444, + 289, + 488 + ], + "type": "text", + "content": "Yu Meng, Mengzhou Xia, and Danqi Chen. 2024. Simpo: Simple preference optimization with a reference-free reward. arXiv preprint arXiv:2405.14734." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 500, + 289, + 544 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 500, + 289, + 544 + ], + "spans": [ + { + "bbox": [ + 69, + 500, + 289, + 544 + ], + "type": "text", + "content": "A. Ng, Daishi Harada, and Stuart J. Russell. 1999. Policy invariance under reward transformations: Theory and application to reward shaping. In International Conference on Machine Learning." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 555, + 289, + 620 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 555, + 289, + 620 + ], + "spans": [ + { + "bbox": [ + 69, + 555, + 289, + 620 + ], + "type": "text", + "content": "Long Ouyang, Jeffrey Wu, Xu Jiang, Diogo Almeida, Carroll Wainwright, Pamela Mishkin, Chong Zhang, Sandhini Agarwal, Katarina Slama, Alex Ray, et al. 2022. Training language models to follow instructions with human feedback. Advances in neural information processing systems, 35:27730-27744." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 631, + 289, + 708 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 631, + 289, + 708 + ], + "spans": [ + { + "bbox": [ + 69, + 631, + 289, + 708 + ], + "type": "text", + "content": "Alexander Pan, Jun Shern Chan, Andy Zou, Nathaniel Li, Steven Basart, Thomas Woodside, Hanlin Zhang, Scott Emmons, and Dan Hendrycks. 2023. Do the rewards justify the means? measuring trade-offs between rewards and ethical behavior in the machiavelli benchmark. In International Conference on Machine Learning, pages 26837-26867. PMLR." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "spans": [ + { + "bbox": [ + 69, + 719, + 289, + 773 + ], + "type": "text", + "content": "Junran Peng, Qing Chang, Haoran Yin, Xingyuan Bu, Ji-jun Sun, Lingxi Xie, Xiaopeng Zhang, Qi Tian, and Zhaoxiang Zhang. 2023. Gaia-universe: Everything is super-netify. IEEE Transactions on Pattern Analysis and Machine Intelligence, 45(10):11856-11868." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 525, + 773 + ], + "type": "list", + "angle": 0, + "index": 22, + "blocks": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "spans": [ + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": "Rafael Rafailov, Joey Hejna, Ryan Park, and Chelsea Finn. 2024. From " + }, + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": " to " + }, + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "inline_equation", + "content": "q^*" + }, + { + "bbox": [ + 304, + 72, + 524, + 116 + ], + "type": "text", + "content": ": Your language model is secretly a q-function. arXiv preprint arXiv:2404.12358." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 125, + 525, + 190 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 125, + 525, + 190 + ], + "spans": [ + { + "bbox": [ + 304, + 125, + 525, + 190 + ], + "type": "text", + "content": "Rafael Rafailov, Archit Sharma, Eric Mitchell, Christopher D Manning, Stefano Ermon, and Chelsea Finn. 2023. Direct preference optimization: Your language model is secretly a reward model. In Thirty-seventh Conference on Neural Information Processing Systems." + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 304, + 200, + 525, + 275 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 200, + 525, + 275 + ], + "spans": [ + { + "bbox": [ + 304, + 200, + 525, + 275 + ], + "type": "text", + "content": "Alexandre Rame, Guillaume Couairon, Corentin Dancette, Jean-Baptiste Gaya, Mustafa Shukor, Laure Soulier, and Matthieu Cord. 2024. Rewarded soups: towards pareto-optimal alignment by interpolating weights fine-tuned on diverse rewards. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 304, + 285, + 525, + 328 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 285, + 525, + 328 + ], + "spans": [ + { + "bbox": [ + 304, + 285, + 525, + 328 + ], + "type": "text", + "content": "John Schulman, Filip Wolski, Prafulla Dhariwal, Alec Radford, and Oleg Klimov. 2017. Proximal policy optimization algorithms. arXiv preprint arXiv:1707.06347." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 304, + 338, + 525, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 338, + 525, + 380 + ], + "spans": [ + { + "bbox": [ + 304, + 338, + 525, + 380 + ], + "type": "text", + "content": "Prasann Singhal, Tanya Goyal, Jiacheng Xu, and Greg Durrett. 2023. A long way to go: Investigating length correlations in rlhf. arXiv preprint arXiv:2310.03716." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 304, + 390, + 525, + 423 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 390, + 525, + 423 + ], + "spans": [ + { + "bbox": [ + 304, + 390, + 525, + 423 + ], + "type": "text", + "content": "Ziang Song, Tianle Cai, Jason D Lee, and Weijie J Su. 2023. Reward collapse in aligning large language models. arXiv preprint arXiv:2305.17608." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 304, + 432, + 525, + 497 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 432, + 525, + 497 + ], + "spans": [ + { + "bbox": [ + 304, + 432, + 525, + 497 + ], + "type": "text", + "content": "Nisan Stiannon, Long Ouyang, Jeffrey Wu, Daniel Ziegler, Ryan Lowe, Chelsea Voss, Alec Radford, Dario Amodei, and Paul F Christiano. 2020. Learning to summarize with human feedback. Advances in Neural Information Processing Systems, 33:3008-3021." + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 304, + 507, + 525, + 571 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 507, + 525, + 571 + ], + "spans": [ + { + "bbox": [ + 304, + 507, + 525, + 571 + ], + "type": "text", + "content": "Haoxiang Wang, Yong Lin, Wei Xiong, Rui Yang, Shizhe Diao, Shuang Qiu, Han Zhao, and Tong Zhang. 2024a. Arithmetic control of llms for diverse user preferences: Directional preference alignment with multi-objective rewards. arXiv preprint arXiv:2402.18571." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 304, + 581, + 525, + 645 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 581, + 525, + 645 + ], + "spans": [ + { + "bbox": [ + 304, + 581, + 525, + 645 + ], + "type": "text", + "content": "Zhilin Wang, Yi Dong, Olivier Delalleau, Jiaqi Zeng, Gerald Shen, Daniel Egert, Jimmy J. Zhang, Makesh Narsimhan Sreedhar, and Oleksii Kuchaiev. 2024b. Helpsteer2: Open-source dataset for training top-performing reward models. Preprint, arXiv:2406.08673." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 304, + 655, + 525, + 699 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 655, + 525, + 699 + ], + "spans": [ + { + "bbox": [ + 304, + 655, + 525, + 699 + ], + "type": "text", + "content": "Yotam Wolf, Noam Wies, Oshri Avnery, Yoav Levine, and Amnon Shashua. 2023. Fundamental limitations of alignment in large language models. arXiv preprint arXiv:2304.11082." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 304, + 708, + 525, + 773 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 708, + 525, + 773 + ], + "spans": [ + { + "bbox": [ + 304, + 708, + 525, + 773 + ], + "type": "text", + "content": "Yanan Wu, Jie Liu, Xingyuan Bu, Jiaheng Liu, Zhanhui Zhou, Yuanxing Zhang, Chenchen Zhang, Zhiqi Bai, Haibin Chen, Tiezheng Ge, et al. 2024a. Conceptmath: A bilingual concept-wise benchmark for measuring mathematical reasoning of large language models. arXiv preprint arXiv:2402.14660." + } + ] + } + ], + "index": 21 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8173" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 9 + }, + { + "para_blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 774 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "spans": [ + { + "bbox": [ + 69, + 72, + 291, + 139 + ], + "type": "text", + "content": "Zeqiu Wu, Yushi Hu, Weijia Shi, Nouha Dziri, Alane Suhr, Prithviraj Ammanabrolu, Noah A Smith, Mari Ostendorf, and Hannaneh Hajishirzi. 2024b. Finegrained human feedback gives better rewards for language model training. Advances in Neural Information Processing Systems, 36." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 69, + 149, + 291, + 324 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 149, + 291, + 324 + ], + "spans": [ + { + "bbox": [ + 69, + 149, + 291, + 324 + ], + "type": "text", + "content": "An Yang, Baosong Yang, Binyuan Hui, Bo Zheng, Bowen Yu, Chang Zhou, Chengpeng Li, Chengyuan Li, Dayiheng Liu, Fei Huang, Guanting Dong, Hao ran Wei, Huan Lin, Jialong Tang, Jialin Wang, Jian Yang, Jianhong Tu, Jianwei Zhang, Jianxin Ma, Jin Xu, Jingren Zhou, Jinze Bai, Jinzheng He, Junyang Lin, Kai Dang, Keming Lu, Keqin Chen, Kexin Yang, Mei Li, Mingfeng Xue, Na Ni, Pei Zhang, Peng Wang, Ru Peng, Rui Men, Ruize Gao, Runji Lin, Shijie Wang, Shuai Bai, Sinan Tan, Tianhang Zhu, Tianhao Li, Tianyu Liu, Wenbin Ge, Xiaodong Deng, Xiaohuan Zhou, Xingzhang Ren, Xinyu Zhang, Xipin Wei, Xuancheng Ren, Yang Fan, Yang Yao, Yichang Zhang, Yu Wan, Yunfei Chu, Yuqiong Liu, Zeyu Cui, Zhenru Zhang, and Zhihao Fan. 2024a. Qwen2 technical report. arXiv preprint arXiv:2407.10671." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 69, + 335, + 290, + 380 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 335, + 290, + 380 + ], + "spans": [ + { + "bbox": [ + 69, + 335, + 290, + 380 + ], + "type": "text", + "content": "Kailai Yang, Zhiwei Liu, Qianqian Xie, Jimin Huang, Erxue Min, and Sophia Ananiadou. 2024b. Selective preference optimization via token-level reward function estimation. arXiv preprint arXiv:2408.13518." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 69, + 390, + 290, + 445 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 390, + 290, + 445 + ], + "spans": [ + { + "bbox": [ + 69, + 390, + 290, + 445 + ], + "type": "text", + "content": "Rui Yang, Xiaoman Pan, Feng Luo, Shuang Qiu, Han Zhong, Dong Yu, and Jianshu Chen. 2024c. Rewards-in-context: Multi-objective alignment of foundation models with dynamic preference adjustment. arXiv preprint arXiv:2402.10207." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 69, + 455, + 291, + 532 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 455, + 291, + 532 + ], + "spans": [ + { + "bbox": [ + 69, + 455, + 291, + 532 + ], + "type": "text", + "content": "Eunseop Yoon, Hee Suk Yoon, SooHwan Eom, Gunsoo Han, Daniel Wontae Nam, Daejin Jo, Kyoung-Woon On, Mark A Hasegawa-Johnson, Sungwoong Kim, and Chang D Yoo. 2024. Tlcr: Token-level continuous reward for fine-grained reinforcement learning from human feedback. arXiv preprint arXiv:2407.16574." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 69, + 544, + 291, + 587 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 544, + 291, + 587 + ], + "spans": [ + { + "bbox": [ + 69, + 544, + 291, + 587 + ], + "type": "text", + "content": "Yongcheng Zeng, Guoqing Liu, Weiyu Ma, Ning Yang, Haifeng Zhang, and Jun Wang. 2024. Token-level direct preference optimization. arXiv preprint arXiv:2404.11999." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 69, + 598, + 291, + 652 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 598, + 291, + 652 + ], + "spans": [ + { + "bbox": [ + 69, + 598, + 291, + 652 + ], + "type": "text", + "content": "Wayne Xin Zhao, Kun Zhou, Junyi Li, Tianyi Tang, Xiaolei Wang, Yupeng Hou, Yingqian Min, Beichen Zhang, Junjie Zhang, Zican Dong, et al. 2023. A survey of large language models. arXiv preprint arXiv:2303.18223." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 69, + 664, + 291, + 741 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 664, + 291, + 741 + ], + "spans": [ + { + "bbox": [ + 69, + 664, + 291, + 741 + ], + "type": "text", + "content": "Lianmin Zheng, Wei-Lin Chiang, Ying Sheng, Siyuan Zhuang, Zhanghao Wu, Yonghao Zhuang, Zi Lin, Zhuohan Li, Dacheng Li, Eric Xing, Hao Zhang, Joseph E. Gonzalez, and Ion Stoica. 2023. Judging LLM-as-a-judge with MT-bench and chatbot arena. In Thirty-seventh Conference on Neural Information Processing Systems Datasets and Benchmarks Track." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 751, + 290, + 774 + ], + "type": "text", + "content": "Zhanhui Zhou, Jie Liu, Jing Shao, Xiangyu Yue, Chao Yang, Wanli Ouyang, and Yu Qiao. 2024. Beyond" + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "ref_text" + }, + { + "bbox": [ + 304, + 72, + 526, + 222 + ], + "type": "list", + "angle": 0, + "index": 13, + "blocks": [ + { + "bbox": [ + 314, + 72, + 526, + 117 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 314, + 72, + 526, + 117 + ], + "spans": [ + { + "bbox": [ + 314, + 72, + 526, + 117 + ], + "type": "text", + "content": "one-preference-fits-all alignment: Multi-objective direct preference optimization. In Findings of the Association for Computational Linguistics ACL 2024, pages 10586-10613." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 304, + 125, + 526, + 159 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 125, + 526, + 159 + ], + "spans": [ + { + "bbox": [ + 304, + 125, + 526, + 159 + ], + "type": "text", + "content": "Brian D. Ziebart. 2010. Modeling purposeful adaptive behavior with the principle of maximum causal entropy." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 304, + 166, + 526, + 222 + ], + "type": "ref_text", + "angle": 0, + "lines": [ + { + "bbox": [ + 304, + 166, + 526, + 222 + ], + "spans": [ + { + "bbox": [ + 304, + 166, + 526, + 222 + ], + "type": "text", + "content": "Daniel M Ziegler, Nisan Stiennon, Jeffrey Wu, Tom B Brown, Alec Radford, Dario Amodei, Paul Christiano, and Geoffrey Irving. 2019. Fine-tuning language models from human preferences. arXiv preprint arXiv:1909.08593." + } + ] + } + ], + "index": 12 + } + ], + "sub_type": "ref_text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8174" + } + ] + } + ], + "index": 14 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 10 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 225, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 225, + 83 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 225, + 83 + ], + "type": "text", + "content": "A Mathematical Derivations" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 68, + 92, + 162, + 105 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 92, + 162, + 105 + ], + "spans": [ + { + "bbox": [ + 68, + 92, + 162, + 105 + ], + "type": "text", + "content": "A.1 Preliminaries" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 67, + 110, + 524, + 136 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 110, + 524, + 136 + ], + "spans": [ + { + "bbox": [ + 67, + 110, + 524, + 136 + ], + "type": "text", + "content": "In the most classic RLHF methods, the optimization goal is typically expressed as an entropy bonus using the following KL-constrained:" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 126, + 145, + 526, + 258 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 126, + 145, + 526, + 258 + ], + "spans": [ + { + "bbox": [ + 126, + 145, + 526, + 258 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} \\max _ {\\pi_ {\\theta}} \\mathbb {E} _ {a _ {t} \\sim \\pi_ {\\theta} (\\cdot | \\mathbf {s} _ {t})} \\sum_ {t = 0} ^ {T} [ r (\\mathbf {s} _ {t}, \\mathbf {a} _ {t}) - \\beta \\mathcal {D} _ {K L} [ \\pi_ {\\theta} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t}) | | \\pi_ {r e f} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t}) ] ] (7) \\\\ = \\max _ {\\pi_ {\\theta}} \\mathbb {E} _ {a _ {t} \\sim \\pi_ {\\theta} (\\cdot | \\mathbf {s} _ {t})} \\sum_ {t = 0} ^ {T} \\left[ r \\left(\\mathbf {s} _ {t}, \\mathbf {a} _ {t}\\right) - \\beta \\log \\frac {\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right)} \\right] (8) \\\\ = \\max _ {\\pi_ {\\theta}} \\mathbb {E} _ {a _ {t} \\sim \\pi_ {\\theta} (\\cdot | \\mathbf {s} _ {t})} [ \\sum_ {t = 0} ^ {T} (r (\\mathbf {s} _ {t}, \\mathbf {a} _ {t}) + \\beta \\log \\pi_ {r e f} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t})) + \\beta \\mathcal {H} (\\pi_ {\\theta}) | \\mathbf {s} _ {0} \\sim \\rho (\\mathbf {s} _ {0}) ] (9) \\\\ \\end{array}", + "image_path": "650bd7a7ae35ad4c427c0a55ab6862a8214264c155dd14262345d9ab4749d1d0.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 265, + 514, + 279 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 265, + 514, + 279 + ], + "spans": [ + { + "bbox": [ + 67, + 265, + 514, + 279 + ], + "type": "text", + "content": "The fixed point solution in the general maximum entropy RL setting is (Ziebart, 2010; Levine, 2018):" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 219, + 285, + 525, + 303 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 219, + 285, + 525, + 303 + ], + "spans": [ + { + "bbox": [ + 219, + 285, + 525, + 303 + ], + "type": "interline_equation", + "content": "\\pi^ {*} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right) = e ^ {\\left(Q ^ {*} \\left(\\mathbf {s} _ {t}, \\mathbf {a} _ {t}\\right) - V ^ {*} \\left(\\mathbf {s} _ {t}\\right)\\right) / \\beta} \\tag {10}", + "image_path": "c1a045939c8149066d5dcb4b6a9140c6aeb4d258b976728fa3de61ca69db3fe5.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 78, + 312, + 345, + 325 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 312, + 345, + 325 + ], + "spans": [ + { + "bbox": [ + 78, + 312, + 345, + 325 + ], + "type": "text", + "content": "The Bradley-Terry preference model in token-level MDP is:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 146, + 333, + 525, + 379 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 146, + 333, + 525, + 379 + ], + "spans": [ + { + "bbox": [ + 146, + 333, + 525, + 379 + ], + "type": "interline_equation", + "content": "p ^ {*} \\left(\\tau^ {w} \\succeq \\tau^ {l}\\right) = \\frac {\\exp \\left(\\sum_ {i = 1} ^ {N} r \\left(\\mathbf {s} _ {i} ^ {w} , \\mathbf {a} _ {i} ^ {w}\\right)\\right)}{\\exp \\left(\\sum_ {i = 1} ^ {N} r \\left(\\mathbf {s} _ {i} ^ {w} , \\mathbf {a} _ {i} ^ {w}\\right)\\right) + \\exp \\left(\\sum_ {i = 1} ^ {M} r \\left(\\mathbf {s} _ {i} ^ {l} , \\mathbf {a} _ {i} ^ {l}\\right)\\right)} \\tag {11}", + "image_path": "d2f5d8d2ad7c23f271f7e2692a96aa179aad78cea42fa43e3cb89ab3a713ac2c.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 68, + 386, + 238, + 398 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 386, + 238, + 398 + ], + "spans": [ + { + "bbox": [ + 68, + 386, + 238, + 398 + ], + "type": "text", + "content": "A.2 DPO in the Token Level MDP" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 404, + 525, + 431 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 404, + 525, + 431 + ], + "spans": [ + { + "bbox": [ + 67, + 404, + 525, + 431 + ], + "type": "text", + "content": "The formula using the " + }, + { + "bbox": [ + 67, + 404, + 525, + 431 + ], + "type": "inline_equation", + "content": "Q" + }, + { + "bbox": [ + 67, + 404, + 525, + 431 + ], + "type": "text", + "content": "-function to measure the relationship between the current timestep and future returns (Rafailov et al., 2024):" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 116, + 439, + 525, + 475 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 439, + 525, + 475 + ], + "spans": [ + { + "bbox": [ + 116, + 439, + 525, + 475 + ], + "type": "interline_equation", + "content": "Q ^ {*} \\left(s _ {t}, a _ {t}\\right) = \\left\\{ \\begin{array}{l l} r \\left(s _ {t}, a _ {t}\\right) + \\beta \\log \\pi_ {r e f} \\left(a _ {t} \\mid s _ {t}\\right) + V ^ {*} \\left(s _ {t + 1}\\right), & \\text {i f} s _ {t + 1} \\text {i s n o t t e r m i n a l} \\\\ r \\left(s _ {t}, a _ {t}\\right) + \\beta \\log \\pi_ {r e f} \\left(a _ {t} \\mid s _ {t}\\right), & \\text {i f} s _ {t + 1} \\text {i s t e r m i n a l} \\end{array} \\right. \\tag {12}", + "image_path": "0256c7cde8d79663c18f5e63cab44a58d700c9ef4e8a62f0cc6f9dd567c7ce3e.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 481, + 464, + 495 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 481, + 464, + 495 + ], + "spans": [ + { + "bbox": [ + 67, + 481, + 464, + 495 + ], + "type": "text", + "content": "Derive the total reward obtained along the entire trajectory based on the above definitions:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 155, + 503, + 526, + 539 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 155, + 503, + 526, + 539 + ], + "spans": [ + { + "bbox": [ + 155, + 503, + 526, + 539 + ], + "type": "interline_equation", + "content": "\\sum_ {t = 0} ^ {T - 1} r \\left(s _ {t}, a _ {t}\\right) = \\sum_ {t = 0} ^ {T - 1} \\left(Q ^ {*} \\left(s _ {t}, a _ {t}\\right) - \\beta \\log \\pi_ {\\text {r e f}} \\left(a _ {t} \\mid s _ {t}\\right) - V ^ {*} \\left(s _ {t + 1}\\right)\\right) \\tag {13}", + "image_path": "75467a34341f4eca564c662a39a7fa91884ce3b5cc0c4dcd242dd585d7b325dc.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 548, + 525, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 548, + 525, + 575 + ], + "spans": [ + { + "bbox": [ + 67, + 548, + 525, + 575 + ], + "type": "text", + "content": "According to the definition of Eq. 24, we know that " + }, + { + "bbox": [ + 67, + 548, + 525, + 575 + ], + "type": "inline_equation", + "content": "V^{*}(S_{T}) = 0" + }, + { + "bbox": [ + 67, + 548, + 525, + 575 + ], + "type": "text", + "content": ". Combining this with the fixed point solution of the optimal policy (Eq. 10), we can further derive:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 116, + 584, + 526, + 697 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 116, + 584, + 526, + 697 + ], + "spans": [ + { + "bbox": [ + 116, + 584, + 526, + 697 + ], + "type": "interline_equation", + "content": "\\begin{array}{l} = Q ^ {*} \\left(s _ {0}, a _ {0}\\right) - \\beta \\log \\pi_ {\\text {r e f}} \\left(a _ {0} \\mid s _ {0}\\right) + \\sum_ {t = 1} ^ {T - 1} \\left(Q ^ {*} \\left(s _ {t}, a _ {t}\\right) - V ^ {*} \\left(s _ {t}\\right) - \\beta \\log \\pi_ {\\text {r e f}} \\left(a _ {t} \\mid s _ {t}\\right)\\right) (14) \\\\ = Q ^ {*} \\left(s _ {0}, a _ {0}\\right) - \\beta \\log \\pi_ {\\text {r e f}} \\left(a _ {0} \\mid s _ {0}\\right) + \\sum_ {t = 1} ^ {T - 1} \\beta \\log \\frac {\\pi^ {*} \\left(a _ {t} \\mid s _ {t}\\right)}{\\pi_ {\\text {r e f}} \\left(a _ {t} \\mid s _ {t}\\right)} (15) \\\\ = V ^ {*} \\left(s _ {0}\\right) + \\sum_ {t = 0} ^ {T - 1} \\beta \\log \\frac {\\pi^ {*} \\left(a _ {t} \\mid s _ {t}\\right)}{\\pi_ {\\mathrm {r e f}} \\left(a _ {t} \\mid s _ {t}\\right)} (16) \\\\ \\end{array}", + "image_path": "dafe17492aaac559e81ed6577f34a95740535fa5a04fb5593a06b87fbfd4838d.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 705, + 525, + 732 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 705, + 525, + 732 + ], + "spans": [ + { + "bbox": [ + 67, + 705, + 525, + 732 + ], + "type": "text", + "content": "By substituting the above result into Eq. 11, we can eliminate " + }, + { + "bbox": [ + 67, + 705, + 525, + 732 + ], + "type": "inline_equation", + "content": "V^{*}(S_{0})" + }, + { + "bbox": [ + 67, + 705, + 525, + 732 + ], + "type": "text", + "content": " in the same way as removing the partition function in DPO, obtaining the Token-level BT model that conforms to the MDP:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 127, + 741, + 525, + 778 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 127, + 741, + 525, + 778 + ], + "spans": [ + { + "bbox": [ + 127, + 741, + 525, + 778 + ], + "type": "interline_equation", + "content": "p _ {\\pi^ {*}} \\left(\\tau^ {w} \\succeq \\tau^ {l}\\right) = \\sigma \\left(\\sum_ {t = 0} ^ {N - 1} \\beta \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {\\text {r e f}} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)} - \\sum_ {t = 0} ^ {M - 1} \\beta \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {\\text {r e f}} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}\\right) \\tag {17}", + "image_path": "ad09cccc29d4b78c3572b2dd4480e44652adfaf06e1a16f38d36321e49b92f2c.jpg" + } + ] + } + ], + "index": 16 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8175" + } + ] + } + ], + "index": 17 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 11 + }, + { + "para_blocks": [ + { + "bbox": [ + 78, + 71, + 332, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 71, + 332, + 84 + ], + "spans": [ + { + "bbox": [ + 78, + 71, + 332, + 84 + ], + "type": "text", + "content": "Thus, the Loss formulation of DPO at the Token level is:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 80, + 91, + 526, + 139 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 80, + 91, + 526, + 139 + ], + "spans": [ + { + "bbox": [ + 80, + 91, + 526, + 139 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\pi_ {\\theta}, \\mathcal {D}\\right) = - \\mathbb {E} _ {\\left(\\tau_ {w}, \\tau_ {l}\\right) \\sim \\mathcal {D}} \\left[ \\log \\sigma \\left(\\left(\\sum_ {t = 0} ^ {N - 1} \\beta \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {\\text {r e f}} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}\\right) - \\left(\\sum_ {t = 0} ^ {M - 1} \\beta \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {\\text {r e f}} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}\\right)\\right) \\right] \\tag {18}", + "image_path": "e28515335a8df0b1ca8e22890fbe6284300166b55e91078c215d2e290ccef4cb.jpg" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 148, + 340, + 161 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 148, + 340, + 161 + ], + "spans": [ + { + "bbox": [ + 68, + 148, + 340, + 161 + ], + "type": "text", + "content": "A.3 The Token-level optimal advantage function of DPO" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 67, + 165, + 515, + 179 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 165, + 515, + 179 + ], + "spans": [ + { + "bbox": [ + 67, + 165, + 515, + 179 + ], + "type": "text", + "content": "By log-linearizing the fixed point solution of the optimal policy at the token level (Eq. 10), we obtain:" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 207, + 188, + 525, + 204 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 207, + 188, + 525, + 204 + ], + "spans": [ + { + "bbox": [ + 207, + 188, + 525, + 204 + ], + "type": "interline_equation", + "content": "\\beta \\log \\pi^ {*} (\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}) = Q ^ {*} (\\mathbf {s} _ {t}, \\mathbf {a} _ {t}) - V ^ {*} (\\mathbf {s} _ {t}) \\tag {19}", + "image_path": "720a7f14212de80911d37edee5ddbce6d0daf2518e804f5afab0f7a85a3299cd.jpg" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 212, + 202, + 226 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 212, + 202, + 226 + ], + "spans": [ + { + "bbox": [ + 67, + 212, + 202, + 226 + ], + "type": "text", + "content": "Then, combining with Eq. 24:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 179, + 233, + 525, + 262 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 179, + 233, + 525, + 262 + ], + "spans": [ + { + "bbox": [ + 179, + 233, + 525, + 262 + ], + "type": "interline_equation", + "content": "\\beta \\log \\frac {\\pi^ {*} (\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t})}{\\pi_ {\\text {r e f}} (\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t})} = r (\\mathbf {s} _ {t}, \\mathbf {a} _ {t}) + V ^ {*} (\\mathbf {s} _ {t + 1}) - V ^ {*} (\\mathbf {s} _ {t}). \\tag {20}", + "image_path": "3867f81ea4e34b2ca14cc3e9de95b5a5313508b0a99d34abd3ef05420b3814ab.jpg" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "spans": [ + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "text", + "content": "Thus, we can establish the relationship between " + }, + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "inline_equation", + "content": "\\beta \\log \\frac{\\pi^{*}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}{\\pi_{\\mathrm{ref}}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}" + }, + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "inline_equation", + "content": "r(\\mathbf{s}_t,\\mathbf{a}_t)" + }, + { + "bbox": [ + 67, + 271, + 524, + 300 + ], + "type": "text", + "content": ". and according to Rafailov et al. 2023's definition:" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "spans": [ + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "content": "Definition 1 Two reward functions " + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "inline_equation", + "content": "r(\\mathbf{s}_t, \\mathbf{a}_t)" + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "inline_equation", + "content": "r'(\\mathbf{s}_t, \\mathbf{a}_t)" + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "content": " are equivalent if there exists a potential function " + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "inline_equation", + "content": "\\Phi(\\mathbf{s})" + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "content": ", such that " + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "inline_equation", + "content": "r'(\\mathbf{s}_t, \\mathbf{a}_t) = r(\\mathbf{s}_t, \\mathbf{a}_t) + \\Phi(\\mathbf{s}_{t+1}) - \\Phi(\\mathbf{s}_t)" + }, + { + "bbox": [ + 67, + 304, + 525, + 332 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 333, + 381, + 354 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 333, + 381, + 354 + ], + "spans": [ + { + "bbox": [ + 67, + 333, + 381, + 354 + ], + "type": "text", + "content": "We can conclude that the optimal advantage function is " + }, + { + "bbox": [ + 67, + 333, + 381, + 354 + ], + "type": "inline_equation", + "content": "\\beta \\log \\frac{\\pi^{*}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}{\\pi_{\\mathrm{ref}}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}" + }, + { + "bbox": [ + 67, + 333, + 381, + 354 + ], + "type": "text", + "content": "." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 67, + 358, + 439, + 371 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 358, + 439, + 371 + ], + "spans": [ + { + "bbox": [ + 67, + 358, + 439, + 371 + ], + "type": "text", + "content": "A.4 Proving that the " + }, + { + "bbox": [ + 67, + 358, + 439, + 371 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 358, + 439, + 371 + ], + "type": "text", + "content": " of DPO can vary across tokens in the token-level MDP" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "spans": [ + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "text", + "content": "When " + }, + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "text", + "content": " is considered as a variable dependent on " + }, + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "inline_equation", + "content": "t" + }, + { + "bbox": [ + 67, + 375, + 409, + 388 + ], + "type": "text", + "content": ", Eq. 9 is transformed into:" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 138, + 397, + 525, + 433 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 138, + 397, + 525, + 433 + ], + "spans": [ + { + "bbox": [ + 138, + 397, + 525, + 433 + ], + "type": "interline_equation", + "content": "\\max _ {\\pi_ {\\theta}} \\mathbb {E} _ {a _ {t} \\sim \\pi_ {\\theta} (\\cdot | \\mathbf {s} _ {t})} \\sum_ {t = 0} ^ {T} \\left[ \\left(r \\left(\\mathbf {s} _ {t}, \\mathbf {a} _ {t}\\right) + \\beta_ {t} \\log \\pi_ {r e f} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right)\\right) - \\beta_ {t} \\log \\pi_ {\\theta} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right) \\right] \\tag {21}", + "image_path": "ff223ee3a0c2800af9a3cbd7a8ba6a433060f58580be54b423710801d9508b14.jpg" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "spans": [ + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "inline_equation", + "content": "\\beta_{t}" + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "text", + "content": " depends solely on " + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{a}_t" + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "inline_equation", + "content": "\\mathbf{s}_t" + }, + { + "bbox": [ + 67, + 441, + 525, + 481 + ], + "type": "text", + "content": ". Then, according to Maximum Entropy Reinforcement Learning with Fixed Dynamics (Levine, 2018), the above formula can be rewritten in a form that includes the KL divergence:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 89, + 489, + 525, + 518 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 89, + 489, + 525, + 518 + ], + "spans": [ + { + "bbox": [ + 89, + 489, + 525, + 518 + ], + "type": "interline_equation", + "content": "= \\mathbb {E} _ {\\mathbf {s} _ {t}} \\left[ \\right. - \\beta_ {t} D _ {K L} \\left( \\right.\\pi_ {\\theta} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t}) \\left\\| \\right. \\frac {1}{\\exp (V (\\mathbf {s} _ {t}))} \\exp \\left(\\frac {r (\\mathbf {s} _ {t} , \\mathbf {a} _ {t}) + \\beta_ {t} \\log \\pi_ {r e f} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t})}{\\beta_ {t}}\\right)\\left. \\right) + V (\\mathbf {s} _ {t}) \\left. \\right] \\tag {22}", + "image_path": "69d8add688d2b9d88fbdef866c24c0684523b76278ca1d171b43a2054e267fc0.jpg" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 528, + 525, + 568 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 528, + 525, + 568 + ], + "spans": [ + { + "bbox": [ + 67, + 528, + 525, + 568 + ], + "type": "text", + "content": "where " + }, + { + "bbox": [ + 67, + 528, + 525, + 568 + ], + "type": "inline_equation", + "content": "V(\\mathbf{s}_t) = \\beta_t\\log \\int_{\\mathcal{A}}[\\exp \\frac{r(\\mathbf{s}_t,\\mathbf{a}_t)}{\\beta_t}\\pi_{ref}(\\mathbf{a}_t|\\mathbf{s}_t)]d\\mathbf{a}_t" + }, + { + "bbox": [ + 67, + 528, + 525, + 568 + ], + "type": "text", + "content": ". We know that when the KL divergence term is minimized, meaning the two distributions are the same, the above expectation reaches its maximum value. That is:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 157, + 575, + 526, + 605 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 157, + 575, + 526, + 605 + ], + "spans": [ + { + "bbox": [ + 157, + 575, + 526, + 605 + ], + "type": "interline_equation", + "content": "\\pi_ {\\theta} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t}) = \\frac {1}{\\exp (V (\\mathbf {s} _ {t}))} \\exp \\left(\\frac {r (\\mathbf {s} _ {t} , \\mathbf {a} _ {t}) + \\beta_ {t} \\log \\pi_ {r e f} (\\mathbf {a} _ {t} | \\mathbf {s} _ {t})}{\\beta_ {t}}\\right) \\tag {23}", + "image_path": "e32167172e001b51db4fdebc66fb51c42938a06976c7be718be81450d6f656ce.jpg" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 612, + 199, + 624 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 612, + 199, + 624 + ], + "spans": [ + { + "bbox": [ + 67, + 612, + 199, + 624 + ], + "type": "text", + "content": "Based on this, we define that:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 114, + 634, + 525, + 669 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 114, + 634, + 525, + 669 + ], + "spans": [ + { + "bbox": [ + 114, + 634, + 525, + 669 + ], + "type": "interline_equation", + "content": "Q ^ {*} \\left(s _ {t}, a _ {t}\\right) = \\left\\{ \\begin{array}{l l} r \\left(s _ {t}, a _ {t}\\right) + \\beta_ {t} \\log \\pi_ {r e f} \\left(a _ {t} \\mid s _ {t}\\right) + V ^ {*} \\left(s _ {t + 1}\\right), & \\text {i f} s _ {t + 1} \\text {i s n o t t e r m i n a l} \\\\ r \\left(s _ {t}, a _ {t}\\right) + \\beta_ {t} \\log \\pi_ {r e f} \\left(a _ {t} \\mid s _ {t}\\right), & \\text {i f} s _ {t + 1} \\text {i s t e r m i n a l} \\end{array} \\right. \\tag {24}", + "image_path": "72110d0a41b76b244d808a0549d2cacc0ba2254103b5a591e8c26bdf2a497c7d.jpg" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 67, + 673, + 311, + 687 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 673, + 311, + 687 + ], + "spans": [ + { + "bbox": [ + 67, + 673, + 311, + 687 + ], + "type": "text", + "content": "Thus we can obtain the solution for the optimal policy:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 226, + 693, + 525, + 710 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 226, + 693, + 525, + 710 + ], + "spans": [ + { + "bbox": [ + 226, + 693, + 525, + 710 + ], + "type": "interline_equation", + "content": "\\pi_ {\\theta} \\left(\\mathbf {a} _ {t} \\mid \\mathbf {s} _ {t}\\right) = e ^ {\\left(Q \\left(\\mathbf {s} _ {t}, \\mathbf {a} _ {t}\\right) - V \\left(\\mathbf {s} _ {t}\\right)\\right) / \\beta_ {t}} \\tag {25}", + "image_path": "f8233282026b2297a6dee19be22b29a697584f1d7af39ea2a39bd7fdab9490b1.jpg" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "spans": [ + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "text", + "content": "Thus, based on the fixed point solution with a varying " + }, + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "text", + "content": " in Eq. 25, we can continue the derivation in section A.2 to obtain the token-level MDP of DPO with vary " + }, + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 719, + 525, + 759 + ], + "type": "text", + "content": " values for different tokens and perform a similar derivation as in Appendix A.3." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 78, + 759, + 500, + 777 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 78, + 759, + 500, + 777 + ], + "spans": [ + { + "bbox": [ + 78, + 759, + 500, + 777 + ], + "type": "text", + "content": "Finally, it can be concluded that " + }, + { + "bbox": [ + 78, + 759, + 500, + 777 + ], + "type": "inline_equation", + "content": "\\beta_{t}\\log \\frac{\\pi^{*}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}{\\pi_{\\mathrm{ref}}(\\mathbf{a}_{t}|\\mathbf{s}_{t})}" + }, + { + "bbox": [ + 78, + 759, + 500, + 777 + ], + "type": "text", + "content": " can serve as the token-level advantage function." + } + ] + } + ], + "index": 22 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 310, + 791 + ], + "type": "text", + "content": "8176" + } + ] + } + ], + "index": 23 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 12 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 183, + 84 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 183, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 183, + 84 + ], + "type": "text", + "content": "A.5 Gradient Analysis" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 515, + 103 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 515, + 103 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 515, + 103 + ], + "type": "text", + "content": "Here's the gradient analysis of token-level DPO (Eq. 5) incorporating fine-grained signals. We define:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 162, + 115, + 525, + 152 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 162, + 115, + 525, + 152 + ], + "spans": [ + { + "bbox": [ + 162, + 115, + 525, + 152 + ], + "type": "interline_equation", + "content": "R _ {w, k} = \\beta \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {w, k} \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}, \\quad R _ {w} = \\sum_ {k = 0} ^ {S _ {w} - 1} R _ {w, k} \\tag {26}", + "image_path": "03faf8481eafc011406620f763903339bae156d866db30453c6ff37eb58a87fe.jpg" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 163, + 154, + 525, + 192 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 163, + 154, + 525, + 192 + ], + "spans": [ + { + "bbox": [ + 163, + 154, + 525, + 192 + ], + "type": "interline_equation", + "content": "R _ {l, k} = \\beta \\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {l, k} \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}, \\quad R _ {l} = \\sum_ {k = 0} ^ {S _ {l} - 1} R _ {l, k}. \\tag {27}", + "image_path": "49e6010670a3a5790d0759714dc08348c87c6539f5e6b05be942273d0ab60743.jpg" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 67, + 203, + 232, + 215 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 203, + 232, + 215 + ], + "spans": [ + { + "bbox": [ + 67, + 203, + 232, + 215 + ], + "type": "text", + "content": "Then, Eq. 5 can be transformed into:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 201, + 228, + 525, + 244 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 201, + 228, + 525, + 244 + ], + "spans": [ + { + "bbox": [ + 201, + 228, + 525, + 244 + ], + "type": "interline_equation", + "content": "\\mathcal {L} \\left(\\pi_ {\\theta}, D\\right) = - \\mathbb {E} _ {\\left(\\tau_ {w}, \\tau_ {l}\\right) \\sim D} \\log \\sigma \\left(R _ {w} - R _ {l}\\right). \\tag {28}", + "image_path": "ec65bb581e8298ca36c410cd8d725ec3549aa6ddd847f8be6837a02e2e63b413.jpg" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 255, + 239, + 268 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 255, + 239, + 268 + ], + "spans": [ + { + "bbox": [ + 67, + 255, + 239, + 268 + ], + "type": "text", + "content": "Then, differentiate the above equation:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 154, + 280, + 525, + 296 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 154, + 280, + 525, + 296 + ], + "spans": [ + { + "bbox": [ + 154, + 280, + 525, + 296 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} (\\pi_ {\\theta}, D) = - \\mathbb {E} _ {(\\tau_ {w}, \\tau_ {l}) \\sim D} [ \\sigma (R _ {l} - R _ {w}) \\cdot (\\nabla_ {\\theta} R _ {w} - \\nabla_ {\\theta} R _ {l}) ]. \\tag {29}", + "image_path": "80ba0807c0d86e4c9f9dbca4209e4701000d7f4c72bc419f8e8a72abe49cc3df.jpg" + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 308, + 240, + 321 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 308, + 240, + 321 + ], + "spans": [ + { + "bbox": [ + 67, + 308, + 240, + 321 + ], + "type": "text", + "content": "Expanding the above equation, we get:" + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 79, + 332, + 525, + 382 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 332, + 525, + 382 + ], + "spans": [ + { + "bbox": [ + 79, + 332, + 525, + 382 + ], + "type": "interline_equation", + "content": "\\nabla_ {\\theta} \\mathcal {L} \\left(\\pi_ {\\theta}, D\\right) = - \\mathbb {E} _ {\\left(\\tau_ {w}, \\tau_ {l}\\right) \\sim D} [ \\beta \\cdot \\sigma \\left(\\left(\\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {w, k} \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right)}\\right) - \\left(\\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {l, k} \\log \\frac {\\pi^ {*} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}{\\pi_ {r e f} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)}\\right)\\right) \\tag {30}", + "image_path": "79513410466641b1df20eabfdca574d9f68e6307057fbe73c8aef7196c206572.jpg" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 387, + 525, + 424 + ], + "type": "interline_equation", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 387, + 525, + 424 + ], + "spans": [ + { + "bbox": [ + 83, + 387, + 525, + 424 + ], + "type": "interline_equation", + "content": "\\cdot \\left(\\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {w, k} \\nabla_ {\\theta} \\log \\pi_ {\\theta} ^ {*} \\left(\\mathbf {a} _ {t} ^ {w} \\mid \\mathbf {s} _ {t} ^ {w}\\right) - \\left(\\sum_ {t = n _ {k}} ^ {n _ {k} + l _ {k}} r _ {l, k} \\nabla_ {\\theta} \\log \\pi_ {\\theta} ^ {*} \\left(\\mathbf {a} _ {t} ^ {l} \\mid \\mathbf {s} _ {t} ^ {l}\\right)\\right) \\right]. \\tag {31}", + "image_path": "282a9a384e0cba16d38ae4363f2e3a715cd0dc840e45ade00a7abaf70cef7ee0.jpg" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "spans": [ + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "text", + "content": "We can see that the gradient difference between the chosen and rejected segments is entirely determined by " + }, + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "inline_equation", + "content": "r_{w,k}" + }, + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "text", + "content": " and " + }, + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "inline_equation", + "content": "r_{l,k}" + }, + { + "bbox": [ + 67, + 434, + 525, + 543 + ], + "type": "text", + "content": ". Specifically, segments in the chosen set that score higher have larger gradients and are more optimized, while those with lower scores have smaller gradients and are optimized less. The same applies to the rejected response. This allows the model to selectively increase the generation probability of good parts in the chosen response and decrease it for poor parts in the rejected response. Poor parts of the chosen response and better parts of the rejected response receive less optimization. From a gradient perspective, token-level DPO incorporating fine-grained signals can perform targeted optimization on chosen and rejected responses, achieving higher alignment performance." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 555, + 211, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 555, + 211, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 555, + 211, + 567 + ], + "type": "text", + "content": "B Implementation Details" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 67, + 577, + 184, + 591 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 577, + 184, + 591 + ], + "spans": [ + { + "bbox": [ + 67, + 577, + 184, + 591 + ], + "type": "text", + "content": "B.1 Hyper-parameters" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "spans": [ + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": "For all the compared methods, we set " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " to 0.2, and the final loss includes " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "0.1\\mathrm{x}" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " of the SFT loss except for ORPO. To ensure a fair comparison, in our method, the " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "\\beta" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " is adaptively adjusted during training by calculating the average score " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "r" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " of all segments within a batch to achieve equivalence with " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "\\beta = 0.2" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": ". For the specific methods: The " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "\\gamma" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " of SimPO is set to 0.5. In TDPO, we use TDPO" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "_2" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " with " + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "inline_equation", + "content": "\\alpha" + }, + { + "bbox": [ + 67, + 596, + 525, + 649 + ], + "type": "text", + "content": " set to 0.5." + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 67, + 651, + 525, + 705 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 651, + 525, + 705 + ], + "spans": [ + { + "bbox": [ + 67, + 651, + 525, + 705 + ], + "type": "text", + "content": "For the 2D-DPO's weights " + }, + { + "bbox": [ + 67, + 651, + 525, + 705 + ], + "type": "inline_equation", + "content": "\\mathbf{W}" + }, + { + "bbox": [ + 67, + 651, + 525, + 705 + ], + "type": "text", + "content": ", we follow Wang et al. 2024b and use a heuristic search, setting the weights for the five aspects Helpfulness, Correctness, Safety, Completeness, Clarity to [0.3, 0.4, 0.1, 0.1, 0.1]. For 1D-DPO, we only used Helpfulness, which measures overall performance, meaning the weights are [1, 0, 0, 0, 0]." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 715, + 167, + 729 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 715, + 167, + 729 + ], + "spans": [ + { + "bbox": [ + 67, + 715, + 167, + 729 + ], + "type": "text", + "content": "B.2 Training Setup" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 67, + 735, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 735, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 735, + 525, + 775 + ], + "type": "text", + "content": "We trained all models on 8 A100-80GB SXM GPUs. The per_device_train_batch_size was set to 1, gradient Accumulation_steps to 8, and we used bfloat16 precision. The initial learning rate was set to 1e-7 with cosine decay. Each method was trained for 700 steps." + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8177" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 13 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 151, + 83 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 151, + 83 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 151, + 83 + ], + "type": "text", + "content": "B.3 Core Codes" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 67, + 89, + 246, + 100 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 89, + 246, + 100 + ], + "spans": [ + { + "bbox": [ + 67, + 89, + 246, + 100 + ], + "type": "text", + "content": "The core code of 2D-DPO is as follows:" + } + ] + } + ], + "index": 1 + }, + { + "type": "code", + "bbox": [ + 69, + 106, + 525, + 774 + ], + "blocks": [ + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "lines": [ + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "spans": [ + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": "def 2D_DPO_loss( \nself, \npolicy_chosen_logps: \"torch.Tensor\", \npolicy_rejected_logps: \"torch.Tensor\", \nreference_chosen_logps: \"torch.Tensor\", \nreference_rejected_logps: \"torch.Tensor\", \nchosen Scores: \"torch.Tensor\", \nrejected Scores: \"torch.Tensor\" ) -> Tuple[\"torch.Tensor\",\"torch.Tensor\",\"torch.Tensor']: \nchosen_rewards " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " policy_chosen_logps.to(self.acceleratordevice)- \nreference_chosen_logps.to(self.acceleratordevice) \nrejected_rewards " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " policy_rejected_logps.to(self.acceleratordevice)- \nreference_rejected_logps.to(self.acceleratordevice) \nchosen_score " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " chosen_score[:,::-1] /4+1 \nrejected_score " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " rejected_score[:,::-1] /4+1 \ndef get_chunked_idxs(scores): \nchunked_idx " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " [] for idx in range(scores.shape[-1]): if idx " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "= = 0" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " pre_score " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " scores[:idx] else: if (scores[:idx] != pre_score).any(): chunked_idx.append(idx) pre_score " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " scores[:idx] chunked_idx.append(scores.shape[-1]) return chunked_idx \ndef compute_que_beta(beta, chosen_score, rejected_score, min_chunk_num): equ_beta_chosen " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " beta / (chosen_score-sum(-1)/min_chunk_num) equ_beta_rejected " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " beta / (rejected_score-sum(-1)/min_chunk_num) return equ_beta_chosen, equ_beta_rejected \nbs " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "=" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " chosen_score.shape[0] \nlosses " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "= 0" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " \nfor idx in range(bs): chosen_chunked_idx " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " get_chunked_idxs(chosen_score[sidx]) rejected_chunked_idx " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " get_chunked_idxs(rejected_score[sidx]) chosen_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " [] rejected_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " [] for i in range(len(chosen_chunked_idx)-1): chosen_score_g " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " (self.W * chosen_score[sidx,:chosen_chunked_idx[i]]).sum(-1) chosen_total Scores.append(chosen_score_g) for i in range(len(rejected_chunked_idx)-1): rejected_score_g " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " (self.W * rejected_score[sidx,:), rejected_chunked_idx[i]).sum(-1) rejected_total Scores.append(rejected_score_g) if len(chosen_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "= = 0" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " or len(rejected_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "= = 0" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " .. continue chosen_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " torch.stack(chosen_total Scores) rejected_total Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " torch.stack(rejected_total Scores) min_chunk_num " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " min(len(chosen_chunked_idx)-1,len(rejected_chunked_idx)-1) top_chosen Indices " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " torch.argsort(chosen_total Scores, dim=0,descending= True)[:min_chunk_num] top_rejected Indices " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " torch.argsort(rejected_total Scores, dim=0)[: min_chunk_num] top_chosen Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " chosen_total Scores [top_chosen Indices] top_rejected Scores " + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "inline_equation", + "content": "\\equiv" + }, + { + "bbox": [ + 69, + 106, + 525, + 774 + ], + "type": "text", + "content": " rejected_total Scores [top_rejected Indices]" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "code_body" + } + ], + "index": 2, + "sub_type": "code", + "guess_lang": "python" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8178" + } + ] + } + ], + "index": 3 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 14 + }, + { + "para_blocks": [ + { + "type": "code", + "bbox": [ + 88, + 72, + 483, + 252 + ], + "blocks": [ + { + "bbox": [ + 88, + 72, + 483, + 252 + ], + "lines": [ + { + "bbox": [ + 88, + 72, + 483, + 252 + ], + "spans": [ + { + "bbox": [ + 88, + 72, + 483, + 252 + ], + "type": "text", + "content": "equu_beta_chosen, equu_beta_rejected = compute_que_beta(self.beta, top_chosen Scores, top_rejected Scores, min_chunk_num) \nfor i in range(min_chunk_num): \n chosen_chunk_idx = top_chosen Indices[i].item() \n rejected_chunk_idx = top_rejected Indices[i].item() \n chosen_rewards_g = chosen_rewards[ idx, chosen_chunked_idx] \n chosen_chunk_idx]: chosen_chunked_idx[chosen_chunk_idx + 1].sum(-1) \n chosenScores_g = chosen_total Scores[chosen_chunk_idx] \n rejected_rewards_g = rejected_rewards[ idx, rejected_chunked_idx] \n rejected_chunk_idx]: rejected_chunked_idx[rejected_chunk_idx + 1].sum(-1) \n rejectedScores_g = rejected_total Scores[rejected_chunk_idx] \n logits = equa_beta_chosen * chosen Scores_g * chosen_rewards_g - equa_beta_rejected * rejectedScores_g * rejected_rewards_g \n losses += -F.logsigmoid(logits) \nlosses = losses / bs \nreturn losses" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "code_body" + } + ], + "index": 0, + "sub_type": "code", + "guess_lang": "python" + }, + { + "bbox": [ + 67, + 282, + 325, + 296 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 282, + 325, + 296 + ], + "spans": [ + { + "bbox": [ + 67, + 282, + 325, + 296 + ], + "type": "text", + "content": "C HelpSteer-2D Data Distribution and Statistics" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 68, + 307, + 176, + 320 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 307, + 176, + 320 + ], + "spans": [ + { + "bbox": [ + 68, + 307, + 176, + 320 + ], + "type": "text", + "content": "C.1 Data Annotation" + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 77, + 346, + 470, + 359 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 346, + 470, + 359 + ], + "spans": [ + { + "bbox": [ + 77, + 346, + 470, + 359 + ], + "type": "text", + "content": "Question: How can I customize it to send my jump count to other players that also have the Weak Aura" + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 77, + 368, + 520, + 414 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 77, + 368, + 520, + 414 + ], + "spans": [ + { + "bbox": [ + 77, + 368, + 520, + 414 + ], + "type": "text", + "content": "Response: 14 To customize the code to send your jump count to other players that also have the Weak Aura, you will need to modify the code to store the jump count in a shared data store. # segment 1 One option could be to use a database or a server-side scripting language to store and manage the data. # segment 2 Here is one example of how you could modify the code to send the jump count to other players: # segment 3" + } + ] + } + ], + "index": 4 + }, + { + "type": "code", + "bbox": [ + 78, + 424, + 497, + 460 + ], + "blocks": [ + { + "bbox": [ + 78, + 424, + 497, + 460 + ], + "lines": [ + { + "bbox": [ + 78, + 424, + 497, + 460 + ], + "spans": [ + { + "bbox": [ + 78, + 424, + 497, + 460 + ], + "type": "text", + "content": "1. First, update the onJump function to store the jump count in a variable that is accessible to other players: # segment 4 \n - var jumpCount = 0; function onJump() { jumpCount++; } *** # segment 5" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "code_body" + } + ], + "index": 5, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "type": "code", + "bbox": [ + 77, + 470, + 521, + 561 + ], + "blocks": [ + { + "bbox": [ + 77, + 470, + 521, + 561 + ], + "lines": [ + { + "bbox": [ + 77, + 470, + 521, + 561 + ], + "spans": [ + { + "bbox": [ + 77, + 470, + 521, + 561 + ], + "type": "text", + "content": "2. Next, create a server-side script that can retrieve the jump count from the database or other data store: # segment 6\n```\nAssuming you are using Node.js or another server-side language\nvar http = require('http');\nvar queryString = require('query-string');\n# segment 7\nConnect to the database\nvar db = new mysql('localhost', 'username', 'password', 'database_name');\n# segment 8\nRetrieve the jump count from the database\nhttp.get('http://yourdomain.com/getJumpCount', function (response, body) { var params = \n queryString.count(body); var jumpCount = params.count; # segment 9" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "code_body" + } + ], + "index": 6, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "type": "code", + "bbox": [ + 77, + 571, + 515, + 606 + ], + "blocks": [ + { + "bbox": [ + 77, + 571, + 515, + 606 + ], + "lines": [ + { + "bbox": [ + 77, + 571, + 515, + 606 + ], + "spans": [ + { + "bbox": [ + 77, + 571, + 515, + 606 + ], + "type": "text", + "content": "Update the jump count in the database or other data store db.query(\"UPDATE players SET jumpCount = jumpCount + \") + jumpCount + \" WHERE username = 'yourusername' \", function (error, results, fields) { if (error) throw error; }); }); #segment 10" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "code_body" + } + ], + "index": 7, + "sub_type": "code", + "guess_lang": "javascript" + }, + { + "bbox": [ + 79, + 616, + 498, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 79, + 616, + 498, + 629 + ], + "spans": [ + { + "bbox": [ + 79, + 616, + 498, + 629 + ], + "type": "text", + "content": "3. Finally, modify the HTML to include a form that allows players to submit their jump count: #segement 11" + } + ] + } + ], + "index": 8 + }, + { + "type": "code", + "bbox": [ + 77, + 638, + 514, + 672 + ], + "blocks": [ + { + "bbox": [ + 77, + 638, + 514, + 672 + ], + "lines": [ + { + "bbox": [ + 77, + 638, + 514, + 672 + ], + "spans": [ + { + "bbox": [ + 77, + 638, + 514, + 672 + ], + "type": "text", + "content": "```\n
\n \n \n \n
\n }\n # segment 12" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "code_body" + } + ], + "index": 9, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 76, + 683, + 502, + 707 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 76, + 683, + 502, + 707 + ], + "spans": [ + { + "bbox": [ + 76, + 683, + 502, + 707 + ], + "type": "text", + "content": "This code uses a server-side script to retrieve the jump count from the database or other data store and update the jump count for the player with the Weak Aura. #segment 13" + } + ] + } + ], + "index": 10 + }, + { + "type": "code", + "bbox": [ + 77, + 717, + 496, + 740 + ], + "blocks": [ + { + "bbox": [ + 77, + 717, + 496, + 740 + ], + "lines": [ + { + "bbox": [ + 77, + 717, + 496, + 740 + ], + "spans": [ + { + "bbox": [ + 77, + 717, + 496, + 740 + ], + "type": "text", + "content": "You can modify the database query to update the jump count for specific players or to store the jump count in a different data store. #segment 14" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "code_body" + } + ], + "index": 11, + "sub_type": "code", + "guess_lang": "txt" + }, + { + "bbox": [ + 183, + 759, + 408, + 772 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 183, + 759, + 408, + 772 + ], + "spans": [ + { + "bbox": [ + 183, + 759, + 408, + 772 + ], + "type": "text", + "content": "Figure 7: An example of splitting the model's response." + } + ] + } + ], + "index": 12, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8179" + } + ] + } + ], + "index": 13 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 15 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 94, + 68, + 503, + 100 + ], + "blocks": [ + { + "bbox": [ + 94, + 68, + 503, + 100 + ], + "lines": [ + { + "bbox": [ + 94, + 68, + 503, + 100 + ], + "spans": [ + { + "bbox": [ + 94, + 68, + 503, + 100 + ], + "type": "table", + "html": "
-Helpfulness(%)Correctness(%)Safety(%)Completeness(%)Clarity(%)
Accuracy87.394.999.484.791.1
", + "image_path": "6f1d99eeeb530d4c9d273f06aebcac2481c7085a3dd8acef86fd591f7fa29c58.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 94, + 133, + 503, + 165 + ], + "blocks": [ + { + "bbox": [ + 166, + 111, + 425, + 124 + ], + "lines": [ + { + "bbox": [ + 166, + 111, + 425, + 124 + ], + "spans": [ + { + "bbox": [ + 166, + 111, + 425, + 124 + ], + "type": "text", + "content": "Table 3: Annotation accuracy of GPT-4 across different aspects." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 133, + 503, + 165 + ], + "lines": [ + { + "bbox": [ + 94, + 133, + 503, + 165 + ], + "spans": [ + { + "bbox": [ + 94, + 133, + 503, + 165 + ], + "type": "table", + "html": "
-Helpfulness(%)Correctness(%)Safety(%)Completeness(%)Clarity(%)
Accuracy74.587.099.681.571.5
", + "image_path": "09b2309b44962dbc846bb8791b5708aa5d5b7008228fd47a63798676ebd40c19.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 149, + 176, + 442, + 188 + ], + "lines": [ + { + "bbox": [ + 149, + 176, + 442, + 188 + ], + "spans": [ + { + "bbox": [ + 149, + 176, + 442, + 188 + ], + "type": "text", + "content": "Table 4: Accuracy of the reward model in annotating 2D reward signals." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 66, + 209, + 525, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 209, + 525, + 303 + ], + "spans": [ + { + "bbox": [ + 66, + 209, + 525, + 303 + ], + "type": "text", + "content": "As discussed in section3.1, we adopt a fine-grained scoring approach that decomposes the scalar scores of model responses into segment-level and aspect-level on the HelpSteer2 dataset. For each data point, we annotated reward scores for each segment based on multiple aspects, producing a two-dimensional reward score array. First, we used Python regular expressions to segment model responses. Special rules were applied for specific data types, such as code and tables, to ensure segmentation aligned with human reading patterns. An example of the segmented data is shown in Figure 7. Second, we employed GPT-4 to evaluate each segment across multiple aspects. Each aspect can be briefly described as follows:" + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 305, + 525, + 451 + ], + "type": "list", + "angle": 0, + "index": 10, + "blocks": [ + { + "bbox": [ + 67, + 305, + 525, + 329 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 305, + 525, + 329 + ], + "spans": [ + { + "bbox": [ + 67, + 305, + 525, + 329 + ], + "type": "text", + "content": "1. **Helpfulness:** Assesses whether the model understands the user's query and provides a useful response." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 332, + 525, + 357 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 332, + 525, + 357 + ], + "spans": [ + { + "bbox": [ + 67, + 332, + 525, + 357 + ], + "type": "text", + "content": "2. Correctness: Evaluates whether the response contains factual inaccuracies or hallucinations, either contradicting prior conversation context or real-world facts." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 359, + 525, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 359, + 525, + 384 + ], + "spans": [ + { + "bbox": [ + 67, + 359, + 525, + 384 + ], + "type": "text", + "content": "3. Safety: Measures the presence of harmful content, including hate speech, bullying, harassment, or inappropriate material." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 386, + 525, + 424 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 386, + 525, + 424 + ], + "spans": [ + { + "bbox": [ + 67, + 386, + 525, + 424 + ], + "type": "text", + "content": "4. Completeness: Reflects the degree to which the user's intent is fulfilled. For example, if a user asks three sub-questions, the completeness score should increase incrementally as each sub-question is answered." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 67, + 426, + 524, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 426, + 524, + 451 + ], + "spans": [ + { + "bbox": [ + 67, + 426, + 524, + 451 + ], + "type": "text", + "content": "5. Clarity: Assesses the response's clarity and conciseness. Ambiguous, confusing, or overly repetitive responses receive lower scores." + } + ] + } + ], + "index": 9 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 66, + 453, + 525, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 453, + 525, + 507 + ], + "spans": [ + { + "bbox": [ + 66, + 453, + 525, + 507 + ], + "type": "text", + "content": "All aspects were rated on a 5-point Likert scale, except for safety, which used a 4-point scale (4 - Safe; 0 - Unsafe). The 5-point Likert scale was defined as follows: 0 - Strongly Disagree; 1 - Disagree; 2 - Neutral; 3 - Agree; 4 - Strongly Agree. Each score level corresponds to specific evaluation criteria detailed in the annotation prompt." + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 67, + 516, + 304, + 528 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 516, + 304, + 528 + ], + "spans": [ + { + "bbox": [ + 67, + 516, + 304, + 528 + ], + "type": "text", + "content": "C.2 Annotation Consistency with Human Labels" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "spans": [ + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "text", + "content": "To assess the consistency between GPT-4 annotations and human labels, we randomly sample 500 data points, each annotated by five human evaluators. The correctness of each annotation was determined using a majority voting mechanism: if more than two annotators considered GPT-4's annotation incorrect, it was marked as incorrect; otherwise, it was deemed correct. The accuracy results, shown in table 3, indicate that all aspects surpass " + }, + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "inline_equation", + "content": "80\\%" + }, + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "text", + "content": " accuracy, with the Safety aspect reaching as high as " + }, + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "inline_equation", + "content": "99.4\\%" + }, + { + "bbox": [ + 66, + 533, + 525, + 614 + ], + "type": "text", + "content": ". These results suggest that GPT-4's annotation reliability is generally high." + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 67, + 623, + 194, + 634 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 623, + 194, + 634 + ], + "spans": [ + { + "bbox": [ + 67, + 623, + 194, + 634 + ], + "type": "text", + "content": "C.3 Reward Distribution" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 66, + 640, + 526, + 721 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 640, + 526, + 721 + ], + "spans": [ + { + "bbox": [ + 66, + 640, + 526, + 721 + ], + "type": "text", + "content": "In Figure 8, we present the distribution of reward scores for both preferred and dispreferred responses. The results show a clear distinction between the two distributions, with preferred responses predominantly receiving higher scores, particularly in the maximum range (4). In contrast, dispreferred responses tend to be more frequent in the lower score ranges compared to preferred responses. Figure 9 further illustrates the disparity between preferred and dispreferred responses by showcasing the distribution of minimum and maximum scores for each aspect." + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 67, + 731, + 230, + 743 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 731, + 230, + 743 + ], + "spans": [ + { + "bbox": [ + 67, + 731, + 230, + 743 + ], + "type": "text", + "content": "C.4 Reward Model Performance" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 66, + 748, + 525, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 66, + 748, + 525, + 775 + ], + "spans": [ + { + "bbox": [ + 66, + 748, + 525, + 775 + ], + "type": "text", + "content": "We trained a reward model that annotates 2D preference signals to label more data at a low cost and conduct iterative DPO training. Specifically, we use the Qwen2-72B as the backbone. The hidden state of" + } + ] + } + ], + "index": 17 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "8180" + } + ] + } + ], + "index": 18 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 16 + }, + { + "para_blocks": [ + { + "type": "image", + "bbox": [ + 71, + 71, + 165, + 157 + ], + "blocks": [ + { + "bbox": [ + 71, + 71, + 165, + 157 + ], + "lines": [ + { + "bbox": [ + 71, + 71, + 165, + 157 + ], + "spans": [ + { + "bbox": [ + 71, + 71, + 165, + 157 + ], + "type": "image", + "image_path": "b765c2d0a31b890a6f5bf9daf2e5d401886a861079d02eca20fa3c3f54ec9fa3.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "image_body" + } + ], + "index": 0 + }, + { + "type": "image", + "bbox": [ + 167, + 72, + 254, + 157 + ], + "blocks": [ + { + "bbox": [ + 167, + 72, + 254, + 157 + ], + "lines": [ + { + "bbox": [ + 167, + 72, + 254, + 157 + ], + "spans": [ + { + "bbox": [ + 167, + 72, + 254, + 157 + ], + "type": "image", + "image_path": "c66cad8e5d7f431b98a112fe8e1176d9645feb036ed205d5bc2421398641fbb6.jpg" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "image_body" + } + ], + "index": 1 + }, + { + "type": "image", + "bbox": [ + 255, + 72, + 343, + 157 + ], + "blocks": [ + { + "bbox": [ + 255, + 72, + 343, + 157 + ], + "lines": [ + { + "bbox": [ + 255, + 72, + 343, + 157 + ], + "spans": [ + { + "bbox": [ + 255, + 72, + 343, + 157 + ], + "type": "image", + "image_path": "3b8edaf61432aa87ad48badb6f2742fef478cf46a176c9db45adf68d699b5d0b.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "image_body" + } + ], + "index": 2 + }, + { + "type": "image", + "bbox": [ + 346, + 72, + 433, + 157 + ], + "blocks": [ + { + "bbox": [ + 346, + 72, + 433, + 157 + ], + "lines": [ + { + "bbox": [ + 346, + 72, + 433, + 157 + ], + "spans": [ + { + "bbox": [ + 346, + 72, + 433, + 157 + ], + "type": "image", + "image_path": "76b050c689c7302f2161051650bf93ecf972537ce597a582ee9d4d01f9d39595.jpg" + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "image_body" + } + ], + "index": 3 + }, + { + "type": "image", + "bbox": [ + 435, + 72, + 521, + 157 + ], + "blocks": [ + { + "bbox": [ + 435, + 72, + 521, + 157 + ], + "lines": [ + { + "bbox": [ + 435, + 72, + 521, + 157 + ], + "spans": [ + { + "bbox": [ + 435, + 72, + 521, + 157 + ], + "type": "image", + "image_path": "abc17ecc767b7b20c74ddc66963390a15e1c77b27cbe71723a389c20c29912ca.jpg" + } + ] + } + ], + "index": 4, + "angle": 0, + "type": "image_body" + } + ], + "index": 4 + }, + { + "type": "image", + "bbox": [ + 72, + 158, + 164, + 248 + ], + "blocks": [ + { + "bbox": [ + 72, + 158, + 164, + 248 + ], + "lines": [ + { + "bbox": [ + 72, + 158, + 164, + 248 + ], + "spans": [ + { + "bbox": [ + 72, + 158, + 164, + 248 + ], + "type": "image", + "image_path": "e478c8e2710c1763cef4c8b94309f1849fd17b975b0a0a842cdadbe8beb44497.jpg" + } + ] + } + ], + "index": 5, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 161, + 260, + 431, + 273 + ], + "lines": [ + { + "bbox": [ + 161, + 260, + 431, + 273 + ], + "spans": [ + { + "bbox": [ + 161, + 260, + 431, + 273 + ], + "type": "text", + "content": "Figure 8: Statistics of reward distribution on HelpSteer-2D dataset." + } + ] + } + ], + "index": 10, + "angle": 0, + "type": "image_caption" + } + ], + "index": 5 + }, + { + "type": "image", + "bbox": [ + 167, + 158, + 254, + 248 + ], + "blocks": [ + { + "bbox": [ + 167, + 158, + 254, + 248 + ], + "lines": [ + { + "bbox": [ + 167, + 158, + 254, + 248 + ], + "spans": [ + { + "bbox": [ + 167, + 158, + 254, + 248 + ], + "type": "image", + "image_path": "4b7c000666c490443b46af89869ead318c12241f7218901193c1d7e99d369be1.jpg" + } + ] + } + ], + "index": 6, + "angle": 0, + "type": "image_body" + } + ], + "index": 6 + }, + { + "type": "image", + "bbox": [ + 256, + 158, + 343, + 248 + ], + "blocks": [ + { + "bbox": [ + 256, + 158, + 343, + 248 + ], + "lines": [ + { + "bbox": [ + 256, + 158, + 343, + 248 + ], + "spans": [ + { + "bbox": [ + 256, + 158, + 343, + 248 + ], + "type": "image", + "image_path": "c69070072b24d66c169b57c812a0a2559b94da41bbb9b47c543da5d112f679d3.jpg" + } + ] + } + ], + "index": 7, + "angle": 0, + "type": "image_body" + } + ], + "index": 7 + }, + { + "type": "image", + "bbox": [ + 346, + 158, + 433, + 248 + ], + "blocks": [ + { + "bbox": [ + 346, + 158, + 433, + 248 + ], + "lines": [ + { + "bbox": [ + 346, + 158, + 433, + 248 + ], + "spans": [ + { + "bbox": [ + 346, + 158, + 433, + 248 + ], + "type": "image", + "image_path": "2e4ac1a8b6b374c04ec122927f3966cc8ca4660121e34a7567b32e3969b7f1c7.jpg" + } + ] + } + ], + "index": 8, + "angle": 0, + "type": "image_body" + } + ], + "index": 8 + }, + { + "type": "image", + "bbox": [ + 434, + 158, + 521, + 248 + ], + "blocks": [ + { + "bbox": [ + 434, + 158, + 521, + 248 + ], + "lines": [ + { + "bbox": [ + 434, + 158, + 521, + 248 + ], + "spans": [ + { + "bbox": [ + 434, + 158, + 521, + 248 + ], + "type": "image", + "image_path": "f4b164b120330be4cb19ca5425874d098005eba1deeb0eac52a6e7a61c9becb8.jpg" + } + ] + } + ], + "index": 9, + "angle": 0, + "type": "image_body" + } + ], + "index": 9 + }, + { + "type": "image", + "bbox": [ + 73, + 288, + 162, + 380 + ], + "blocks": [ + { + "bbox": [ + 73, + 288, + 162, + 380 + ], + "lines": [ + { + "bbox": [ + 73, + 288, + 162, + 380 + ], + "spans": [ + { + "bbox": [ + 73, + 288, + 162, + 380 + ], + "type": "image", + "image_path": "a79d01b04b954ce167e5f84bc8e5bd0ecaca9e27a62861b967a0bb4fe4ca396d.jpg" + } + ] + } + ], + "index": 11, + "angle": 0, + "type": "image_body" + } + ], + "index": 11 + }, + { + "type": "image", + "bbox": [ + 163, + 288, + 251, + 380 + ], + "blocks": [ + { + "bbox": [ + 163, + 288, + 251, + 380 + ], + "lines": [ + { + "bbox": [ + 163, + 288, + 251, + 380 + ], + "spans": [ + { + "bbox": [ + 163, + 288, + 251, + 380 + ], + "type": "image", + "image_path": "15607b66690120c55546a2dfea891049499c1ca632a3805972a65d6857322a9d.jpg" + } + ] + } + ], + "index": 12, + "angle": 0, + "type": "image_body" + } + ], + "index": 12 + }, + { + "type": "image", + "bbox": [ + 252, + 288, + 341, + 380 + ], + "blocks": [ + { + "bbox": [ + 252, + 288, + 341, + 380 + ], + "lines": [ + { + "bbox": [ + 252, + 288, + 341, + 380 + ], + "spans": [ + { + "bbox": [ + 252, + 288, + 341, + 380 + ], + "type": "image", + "image_path": "f80b3068dd3c54eab02b1ee08d18507a3bcd124c4f384791a4ec03ca185de6ce.jpg" + } + ] + } + ], + "index": 13, + "angle": 0, + "type": "image_body" + } + ], + "index": 13 + }, + { + "type": "image", + "bbox": [ + 343, + 288, + 430, + 380 + ], + "blocks": [ + { + "bbox": [ + 343, + 288, + 430, + 380 + ], + "lines": [ + { + "bbox": [ + 343, + 288, + 430, + 380 + ], + "spans": [ + { + "bbox": [ + 343, + 288, + 430, + 380 + ], + "type": "image", + "image_path": "c4b1813f77ef85f85a3aee519b86781febd1a088f03d1f7ec8879e4dc7171240.jpg" + } + ] + } + ], + "index": 14, + "angle": 0, + "type": "image_body" + } + ], + "index": 14 + }, + { + "type": "image", + "bbox": [ + 432, + 288, + 521, + 380 + ], + "blocks": [ + { + "bbox": [ + 432, + 288, + 521, + 380 + ], + "lines": [ + { + "bbox": [ + 432, + 288, + 521, + 380 + ], + "spans": [ + { + "bbox": [ + 432, + 288, + 521, + 380 + ], + "type": "image", + "image_path": "2b0a8c5f5ee40e94d6b3fa314d3144a2b579032009a31e2c1ab2425978da1e3b.jpg" + } + ] + } + ], + "index": 15, + "angle": 0, + "type": "image_body" + } + ], + "index": 15 + }, + { + "type": "image", + "bbox": [ + 73, + 382, + 162, + 474 + ], + "blocks": [ + { + "bbox": [ + 73, + 382, + 162, + 474 + ], + "lines": [ + { + "bbox": [ + 73, + 382, + 162, + 474 + ], + "spans": [ + { + "bbox": [ + 73, + 382, + 162, + 474 + ], + "type": "image", + "image_path": "c265fcd2fad818e88655899434abff8adf8826887b790f6da570324ad458e522.jpg" + } + ] + } + ], + "index": 16, + "angle": 0, + "type": "image_body" + } + ], + "index": 16 + }, + { + "type": "image", + "bbox": [ + 163, + 382, + 251, + 474 + ], + "blocks": [ + { + "bbox": [ + 163, + 382, + 251, + 474 + ], + "lines": [ + { + "bbox": [ + 163, + 382, + 251, + 474 + ], + "spans": [ + { + "bbox": [ + 163, + 382, + 251, + 474 + ], + "type": "image", + "image_path": "98896c272f62cd8fd0a94a4e983e091ae1ab966aab5619516094daffbdd50605.jpg" + } + ] + } + ], + "index": 17, + "angle": 0, + "type": "image_body" + } + ], + "index": 17 + }, + { + "type": "image", + "bbox": [ + 252, + 382, + 341, + 474 + ], + "blocks": [ + { + "bbox": [ + 252, + 382, + 341, + 474 + ], + "lines": [ + { + "bbox": [ + 252, + 382, + 341, + 474 + ], + "spans": [ + { + "bbox": [ + 252, + 382, + 341, + 474 + ], + "type": "image", + "image_path": "19099d125dec5ee2cd80d457643db34d5ddbbe55bdb4c6f95914edf7c305dd43.jpg" + } + ] + } + ], + "index": 18, + "angle": 0, + "type": "image_body" + } + ], + "index": 18 + }, + { + "type": "image", + "bbox": [ + 343, + 382, + 430, + 474 + ], + "blocks": [ + { + "bbox": [ + 343, + 382, + 430, + 474 + ], + "lines": [ + { + "bbox": [ + 343, + 382, + 430, + 474 + ], + "spans": [ + { + "bbox": [ + 343, + 382, + 430, + 474 + ], + "type": "image", + "image_path": "69a2b2b3fdb9f3f626456d8668819d21a47a957db8845afb813fe4b011667826.jpg" + } + ] + } + ], + "index": 19, + "angle": 0, + "type": "image_body" + } + ], + "index": 19 + }, + { + "type": "image", + "bbox": [ + 432, + 382, + 521, + 474 + ], + "blocks": [ + { + "bbox": [ + 432, + 382, + 521, + 474 + ], + "lines": [ + { + "bbox": [ + 432, + 382, + 521, + 474 + ], + "spans": [ + { + "bbox": [ + 432, + 382, + 521, + 474 + ], + "type": "image", + "image_path": "e684b8fc7090f78f6b820dd5593789112d9ad4f5be57b32ea903a9d57491dc9d.jpg" + } + ] + } + ], + "index": 20, + "angle": 0, + "type": "image_body" + } + ], + "index": 20 + }, + { + "type": "image", + "bbox": [ + 73, + 475, + 161, + 567 + ], + "blocks": [ + { + "bbox": [ + 73, + 475, + 161, + 567 + ], + "lines": [ + { + "bbox": [ + 73, + 475, + 161, + 567 + ], + "spans": [ + { + "bbox": [ + 73, + 475, + 161, + 567 + ], + "type": "image", + "image_path": "7598841bd96538436da002f2e98e11e44f2bf79c44fa770570c9e01b13437aaa.jpg" + } + ] + } + ], + "index": 21, + "angle": 0, + "type": "image_body" + } + ], + "index": 21 + }, + { + "type": "image", + "bbox": [ + 163, + 475, + 251, + 567 + ], + "blocks": [ + { + "bbox": [ + 163, + 475, + 251, + 567 + ], + "lines": [ + { + "bbox": [ + 163, + 475, + 251, + 567 + ], + "spans": [ + { + "bbox": [ + 163, + 475, + 251, + 567 + ], + "type": "image", + "image_path": "0921bac48c166825ff771440ae2c75f582cbf46dda2d4a448ec74c318d3e7764.jpg" + } + ] + } + ], + "index": 22, + "angle": 0, + "type": "image_body" + } + ], + "index": 22 + }, + { + "type": "image", + "bbox": [ + 252, + 475, + 341, + 567 + ], + "blocks": [ + { + "bbox": [ + 252, + 475, + 341, + 567 + ], + "lines": [ + { + "bbox": [ + 252, + 475, + 341, + 567 + ], + "spans": [ + { + "bbox": [ + 252, + 475, + 341, + 567 + ], + "type": "image", + "image_path": "6bdcf46ae132c26004dcb84020a0098c34a245b23f726aca3d229f4aec9b5a8d.jpg" + } + ] + } + ], + "index": 23, + "angle": 0, + "type": "image_body" + } + ], + "index": 23 + }, + { + "type": "image", + "bbox": [ + 343, + 475, + 430, + 567 + ], + "blocks": [ + { + "bbox": [ + 343, + 475, + 430, + 567 + ], + "lines": [ + { + "bbox": [ + 343, + 475, + 430, + 567 + ], + "spans": [ + { + "bbox": [ + 343, + 475, + 430, + 567 + ], + "type": "image", + "image_path": "27e97b5b01ff026352ee23f12228424ac77a4e38ab5482788ef83ee45a55ab94.jpg" + } + ] + } + ], + "index": 24, + "angle": 0, + "type": "image_body" + } + ], + "index": 24 + }, + { + "type": "image", + "bbox": [ + 432, + 475, + 521, + 567 + ], + "blocks": [ + { + "bbox": [ + 432, + 475, + 521, + 567 + ], + "lines": [ + { + "bbox": [ + 432, + 475, + 521, + 567 + ], + "spans": [ + { + "bbox": [ + 432, + 475, + 521, + 567 + ], + "type": "image", + "image_path": "9aaa8811135fd2e039bbc6ef6058706203e8bdf3801d05b0c6eabbfabe7e8d7a.jpg" + } + ] + } + ], + "index": 25, + "angle": 0, + "type": "image_body" + } + ], + "index": 25 + }, + { + "type": "image", + "bbox": [ + 73, + 569, + 161, + 661 + ], + "blocks": [ + { + "bbox": [ + 73, + 569, + 161, + 661 + ], + "lines": [ + { + "bbox": [ + 73, + 569, + 161, + 661 + ], + "spans": [ + { + "bbox": [ + 73, + 569, + 161, + 661 + ], + "type": "image", + "image_path": "222c377c0c4a03037ee4d6db611d0f68b8f0e07717f7eb31c7dd6e5a53e98b5e.jpg" + } + ] + } + ], + "index": 26, + "angle": 0, + "type": "image_body" + }, + { + "bbox": [ + 77, + 672, + 515, + 685 + ], + "lines": [ + { + "bbox": [ + 77, + 672, + 515, + 685 + ], + "spans": [ + { + "bbox": [ + 77, + 672, + 515, + 685 + ], + "type": "text", + "content": "Figure 9: Statistics of the distribution of the highest/lowest values of each response on HelpSteer-2D dataset." + } + ] + } + ], + "index": 31, + "angle": 0, + "type": "image_caption" + } + ], + "index": 26 + }, + { + "type": "image", + "bbox": [ + 163, + 569, + 251, + 661 + ], + "blocks": [ + { + "bbox": [ + 163, + 569, + 251, + 661 + ], + "lines": [ + { + "bbox": [ + 163, + 569, + 251, + 661 + ], + "spans": [ + { + "bbox": [ + 163, + 569, + 251, + 661 + ], + "type": "image", + "image_path": "c1c418c9051920900f12cb1d7a8dcbb8efb537e42fce7e651a7cc30db112a5ca.jpg" + } + ] + } + ], + "index": 27, + "angle": 0, + "type": "image_body" + } + ], + "index": 27 + }, + { + "type": "image", + "bbox": [ + 252, + 569, + 341, + 661 + ], + "blocks": [ + { + "bbox": [ + 252, + 569, + 341, + 661 + ], + "lines": [ + { + "bbox": [ + 252, + 569, + 341, + 661 + ], + "spans": [ + { + "bbox": [ + 252, + 569, + 341, + 661 + ], + "type": "image", + "image_path": "063bae8c5c276c8c6ed409377a4f7ee51c0f2fe40e3967b32ffb27f1cb21446c.jpg" + } + ] + } + ], + "index": 28, + "angle": 0, + "type": "image_body" + } + ], + "index": 28 + }, + { + "type": "image", + "bbox": [ + 343, + 569, + 430, + 661 + ], + "blocks": [ + { + "bbox": [ + 343, + 569, + 430, + 661 + ], + "lines": [ + { + "bbox": [ + 343, + 569, + 430, + 661 + ], + "spans": [ + { + "bbox": [ + 343, + 569, + 430, + 661 + ], + "type": "image", + "image_path": "6271af355ccc42def918ef0e881e8d7ebe2784e9fba80a582907fce52eb2d7a7.jpg" + } + ] + } + ], + "index": 29, + "angle": 0, + "type": "image_body" + } + ], + "index": 29 + }, + { + "type": "image", + "bbox": [ + 432, + 569, + 521, + 661 + ], + "blocks": [ + { + "bbox": [ + 432, + 569, + 521, + 661 + ], + "lines": [ + { + "bbox": [ + 432, + 569, + 521, + 661 + ], + "spans": [ + { + "bbox": [ + 432, + 569, + 521, + 661 + ], + "type": "image", + "image_path": "322feb6b498941fe7b18a58c2cefdb11b14a9a0ca7a9d16b2ffdd4b76a960234.jpg" + } + ] + } + ], + "index": 30, + "angle": 0, + "type": "image_body" + } + ], + "index": 30 + }, + { + "bbox": [ + 67, + 708, + 526, + 775 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 708, + 526, + 775 + ], + "spans": [ + { + "bbox": [ + 67, + 708, + 526, + 775 + ], + "type": "text", + "content": "the last layer for each segment's final token is passed through a linear layer to generate a prediction score, utilizing MSE loss for training. In table 4, we report the accuracy of the reward model. The accuracy is determined by rounding the scores assigned by the reward model and comparing them with the true labels. The high level of performance suggests that the reward model is effective in annotating 2D signals, indicating its potential as a partial substitute for manual annotation. This work lays the groundwork for" + } + ] + } + ], + "index": 32 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 308, + 791 + ], + "type": "text", + "content": "8181" + } + ] + } + ], + "index": 33 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 17 + }, + { + "para_blocks": [ + { + "bbox": [ + 68, + 71, + 221, + 84 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 68, + 71, + 221, + 84 + ], + "spans": [ + { + "bbox": [ + 68, + 71, + 221, + 84 + ], + "type": "text", + "content": "further research in online learning." + } + ] + } + ], + "index": 0 + }, + { + "type": "table", + "bbox": [ + 94, + 123, + 503, + 192 + ], + "blocks": [ + { + "bbox": [ + 68, + 94, + 264, + 107 + ], + "lines": [ + { + "bbox": [ + 68, + 94, + 264, + 107 + ], + "spans": [ + { + "bbox": [ + 68, + 94, + 264, + 107 + ], + "type": "text", + "content": "D Performance of Iterative 2D-DPO" + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "table_caption" + }, + { + "bbox": [ + 94, + 123, + 503, + 192 + ], + "lines": [ + { + "bbox": [ + 94, + 123, + 503, + 192 + ], + "spans": [ + { + "bbox": [ + 94, + 123, + 503, + 192 + ], + "type": "table", + "html": "
ModelIterationArena-HardAlpacaEval 2.0MT-Bench
WR (%)Avg. len95% CILC (%)WR (%)Avg. lenTurn 1Turn 2Avg. Score
Qwen2-7B-Instruct +2D-DPO025.10583(-2.1, 2.0)30.6828.3218628.016.617.31
128.30572(-2.0, 2.3)31.0629.3219838.066.587.32
229.80585(-1.8, 2.2)31.9329.9419728.136.547.34
330.70592(-1.7, 1.9)32.5530.4319928.206.727.46
", + "image_path": "770a070bd10051d2108616129f7706199b0fc40d33391ef074d4a765f0a527d2.jpg" + } + ] + } + ], + "index": 2, + "angle": 0, + "type": "table_body" + } + ], + "index": 2 + }, + { + "bbox": [ + 103, + 200, + 489, + 212 + ], + "lines": [ + { + "bbox": [ + 103, + 200, + 489, + 212 + ], + "spans": [ + { + "bbox": [ + 103, + 200, + 489, + 212 + ], + "type": "text", + "content": "Table 5: Experimental results of Iterative 2D-DPO using a 2D-reward model to generate scores." + } + ] + } + ], + "index": 3, + "angle": 0, + "type": "text" + }, + { + "bbox": [ + 67, + 228, + 525, + 364 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 228, + 525, + 364 + ], + "spans": [ + { + "bbox": [ + 67, + 228, + 525, + 364 + ], + "type": "text", + "content": "In addition to analyzing the accuracy of the 2D-reward model, we also utilized it to generate 2D scores for iterative training. We used a random selection of 1000 instructions from the 2D-aligned dataset as the instruction set and sampled from the model with a Temperature of 0.7 and top_p of 0.8. For each instruction, we sampled 4 responses. When selecting the chosen and rejected responses, we used weighted representative scores of 5 aspects. Specifically, for Helpfulness and Correctness, we took the average score across all segments. For Safety, we selected the minimum value. For Completeness and Clarity, we used the score from the last segment. The weights remained the same as before, with [0.3, 0.4, 0.1, 0.1, 0.1]. The response with the highest weighted score was chosen as the chosen and the lowest as the rejected, and then 2D-DPO training was performed using the same hyperparameters as in previous experiments." + } + ] + } + ], + "index": 4 + }, + { + "bbox": [ + 67, + 364, + 525, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 364, + 525, + 405 + ], + "spans": [ + { + "bbox": [ + 67, + 364, + 525, + 405 + ], + "type": "text", + "content": "The experimental results are shown in Table 5, where it can be observed that as the iteration increases, the model's performance across different benchmarks tends to improve. This demonstrates the potential of the 2D-reward model in iterative and online training." + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 67, + 413, + 269, + 428 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 413, + 269, + 428 + ], + "spans": [ + { + "bbox": [ + 67, + 413, + 269, + 428 + ], + "type": "text", + "content": "E Eexamples of Controllable Training" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 67, + 436, + 526, + 544 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 436, + 526, + 544 + ], + "spans": [ + { + "bbox": [ + 67, + 436, + 526, + 544 + ], + "type": "text", + "content": "Our method can achieve different style generation by setting different aspect weights. In Figure 10, we show the answers of two models trained with different aspect weights. Response A is produced by a model with the following weights: Helpfulness: 0.3, Correctness: 0.3, Safety: 0.1, Completeness: 0.1, Clarity: 0.2, while Response B is produced with weights set to: Helpfulness: 0.3, Correctness: 0.3, Safety: 0.1, Completeness: 0.2, Clarity: 0.1. Model A, with a higher weight on clarity(0.2) and a lower weight on completeness(0.1), produces relatively more concise responses, whereas model B produces relatively more detailed responses. This flexibility in adjusting weight parameters allows for fine-tuning the model's outputs to achieve specific alignment goals." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 67, + 553, + 366, + 567 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 67, + 553, + 366, + 567 + ], + "spans": [ + { + "bbox": [ + 67, + 553, + 366, + 567 + ], + "type": "text", + "content": "F Prompt Used for Annotating 2D Fine-Grained Signals" + } + ] + } + ], + "index": 8 + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8182" + } + ] + } + ], + "index": 9 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 18 + }, + { + "para_blocks": [ + { + "type": "table", + "bbox": [ + 70, + 106, + 523, + 709 + ], + "blocks": [ + { + "bbox": [ + 70, + 106, + 523, + 709 + ], + "lines": [ + { + "bbox": [ + 70, + 106, + 523, + 709 + ], + "spans": [ + { + "bbox": [ + 70, + 106, + 523, + 709 + ], + "type": "table", + "html": "
promptWhat are the practical gift ideas for Valentine's Day gift for girlfriend?
Response AHere are some practical Valentine's Day gift ideas:1. Personalized jewelry (necklace, bracelet); 2. A spa day or massage voucher; 3. A bouquet of her favorite flowers; 4. Perfume she loves or has been eyeing; 5. A high-quality makeup set or skincare products; 6. A romantic dinner at her favorite restaurant; 7. A weekend getaway to a cozy bed and breakfast; 8. A heartfelt love letter or poetry book; 9. A stylish handbag or wallet; 10. A subscription box tailored to her interests (books, beauty, etc.).
Response BPractical gifts for Valentine's Day can be just as thoughtful and romantic as more traditional options, especially if they cater to your girlfriend's daily needs or hobbies. Here are some practical yet heartfelt gift ideas:1. Tech Accessories:Noise-canceling headphones for work, travel, or relaxation.A high-quality laptop bag or tablet case.A smartwatch or fitness tracker if she's into health and wellness.2. Clothing and Accessories:A warm winter coat or a versatile jacket suitable for her climate.Stylish and comfortable shoes for everyday wear.Quality undergarments from a brand she loves.3. Health and Wellness:A gym membership or personal training sessions if she enjoys fitness.A yoga mat or equipment for her home workouts.A water bottle with a built-in infuser for flavored water....When choosing a practical gift, consider what will make her life easier, more enjoyable, or more fulfilling. Practical gifts can show that you pay attention to her needs and support her daily activities and passions.
promptHow does one determine their self worth?
Response ADetermining self-worth involves recognizing your inherent value as a person. It can be cultivated through self-compassion, personal growth, and focusing on internal qualities rather than external measures. Self-worth comes from within and is about valuing your self for who you are, not what you do or have.
Response BDetermining your self-worth is fundamentally about recognizing your inherent value as a person. It involves a few key steps:Firstly, self-awareness is crucial. Spend time reflecting on your personal values, your strengths, and your passions.Secondly, challenge the negative narratives you might tell yourself. Often, we are our own harshest critics. When you catch yourself thinking negatively about your abilities o r worth, pause and reframe those thoughts into something more constructive and positivethirdly, seek out positive influences. Surround yourself with people who uplift you and recognize your worth. Their encouragement can reinforce your own belief in yourself.By focusing on these aspects, you can build a solid foundation of self-worth that supporrs your mental health and overall well-being.
", + "image_path": "5426017630ed11e1277882afe95edd8fc9be5aa581cffb6cb801dd4a6f371649.jpg" + } + ] + } + ], + "index": 0, + "angle": 0, + "type": "table_body" + } + ], + "index": 0 + }, + { + "bbox": [ + 95, + 723, + 496, + 735 + ], + "lines": [ + { + "bbox": [ + 95, + 723, + 496, + 735 + ], + "spans": [ + { + "bbox": [ + 95, + 723, + 496, + 735 + ], + "type": "text", + "content": "Figure 10: Examples of different style generation by the models trained by different aspect weights." + } + ] + } + ], + "index": 1, + "angle": 0, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 309, + 791 + ], + "type": "text", + "content": "8183" + } + ] + } + ], + "index": 2 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 19 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 101, + 509, + 142 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 101, + 509, + 142 + ], + "spans": [ + { + "bbox": [ + 83, + 101, + 509, + 142 + ], + "type": "text", + "content": "Now you are an intelligent annotator, and your task is to evaluate the quality of the responses of two intelligent assistants, and evaluate them sentence by sentence on the dimension of helpfulness/understanding." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 85, + 143, + 150, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 85, + 143, + 150, + 154 + ], + "spans": [ + { + "bbox": [ + 85, + 143, + 150, + 154 + ], + "type": "text", + "content": "Requirements:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 156, + 509, + 208 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "spans": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "type": "text", + "content": "1. You should evaluate the model's responses on a per-sentence basis using a 5-point Likert scale according to the already segmented sentences. The response tags the segmented sentences in the format of sentence content, where ID is the sentence's sequence number." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 196, + 484, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 196, + 484, + 208 + ], + "spans": [ + { + "bbox": [ + 83, + 196, + 484, + 208 + ], + "type": "text", + "content": "2. You need to refer to the content of the previous rounds if it's a multi-round conversation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 223, + 283, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 223, + 283, + 236 + ], + "spans": [ + { + "bbox": [ + 83, + 223, + 283, + 236 + ], + "type": "text", + "content": "Scores and corresponding evaluation criteria:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 237, + 509, + 263 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 237, + 509, + 263 + ], + "spans": [ + { + "bbox": [ + 83, + 237, + 509, + 263 + ], + "type": "text", + "content": "4 - The response is extremely helpful and completely aligned with the spirit of what the prompt was asking for." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 264, + 509, + 317 + ], + "type": "list", + "angle": 0, + "index": 9, + "blocks": [ + { + "bbox": [ + 83, + 264, + 509, + 290 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 264, + 509, + 290 + ], + "spans": [ + { + "bbox": [ + 83, + 264, + 509, + 290 + ], + "type": "text", + "content": "3 - The response is mostly helpful and mainly aligned with what the user was looking for, but there is still some room for improvement." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 291, + 509, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 291, + 509, + 317 + ], + "spans": [ + { + "bbox": [ + 83, + 291, + 509, + 317 + ], + "type": "text", + "content": "2 - The response is partially helpful but misses the overall goal of the user's query/input in some way. The response did not fully satisfy what the user was looking for." + } + ] + } + ], + "index": 8 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 318, + 509, + 370 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "spans": [ + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "type": "text", + "content": "1 - The response is borderline unhelpful and mostly does not capture what the user was looking for, but it is still usable and helpful in a small way." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 345, + 509, + 370 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 345, + 509, + 370 + ], + "spans": [ + { + "bbox": [ + 83, + 345, + 509, + 370 + ], + "type": "text", + "content": "0 - The response is not useful or helpful at all. The response completely missed the essence of what the user wanted." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 386, + 144, + 398 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 386, + 144, + 398 + ], + "spans": [ + { + "bbox": [ + 83, + 386, + 144, + 398 + ], + "type": "text", + "content": "For example:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 84, + 400, + 108, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 400, + 108, + 411 + ], + "spans": [ + { + "bbox": [ + 84, + 400, + 108, + 411 + ], + "type": "text", + "content": "user:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 426, + 227, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 426, + 227, + 439 + ], + "spans": [ + { + "bbox": [ + 83, + 426, + 227, + 439 + ], + "type": "text", + "content": "Conversation History: {history}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 454, + 265, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 454, + 265, + 466 + ], + "spans": [ + { + "bbox": [ + 83, + 454, + 265, + 466 + ], + "type": "text", + "content": "Response 1 to be evaluated: {response1}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "spans": [ + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "type": "text", + "content": "Response 2 to be evaluated: {response2}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 508, + 126, + 518 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 508, + 126, + 518 + ], + "spans": [ + { + "bbox": [ + 83, + 508, + 126, + 518 + ], + "type": "text", + "content": "assistant:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 521, + 201, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 521, + 201, + 534 + ], + "spans": [ + { + "bbox": [ + 83, + 521, + 201, + 534 + ], + "type": "text", + "content": "Response 1 Evaluation: ..." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 535, + 492, + 575 + ], + "type": "list", + "angle": 0, + "index": 23, + "blocks": [ + { + "bbox": [ + 83, + 535, + 483, + 547 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 535, + 483, + 547 + ], + "spans": [ + { + "bbox": [ + 83, + 535, + 483, + 547 + ], + "type": "inline_equation", + "content": "< 1 >" + }, + { + "bbox": [ + 83, + 535, + 483, + 547 + ], + "type": "text", + "content": " Evaluation: Completely aligned with the user's query introducing the topic effectively." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 84, + 549, + 249, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 549, + 249, + 560 + ], + "spans": [ + { + "bbox": [ + 84, + 549, + 249, + 560 + ], + "type": "text", + "content": "Helpfulness/Understanding Rating: 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 562, + 492, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 562, + 492, + 575 + ], + "spans": [ + { + "bbox": [ + 83, + 562, + 492, + 575 + ], + "type": "inline_equation", + "content": "< 2 >" + }, + { + "bbox": [ + 83, + 562, + 492, + 575 + ], + "type": "text", + "content": " Evaluation: Straightforward lead into the listing of tools, setting up the context perfectly." + } + ] + } + ], + "index": 22 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 84, + 576, + 249, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 576, + 249, + 588 + ], + "spans": [ + { + "bbox": [ + 84, + 576, + 249, + 588 + ], + "type": "text", + "content": "Helpfulness/Understanding Rating: 4" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 84, + 590, + 113, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 590, + 113, + 600 + ], + "spans": [ + { + "bbox": [ + 84, + 590, + 113, + 600 + ], + "type": "text", + "content": "<3> ..." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 84, + 602, + 189, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 602, + 189, + 614 + ], + "spans": [ + { + "bbox": [ + 84, + 602, + 189, + 614 + ], + "type": "text", + "content": "Response 2 Evaluation:" + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 83, + 616, + 387, + 681 + ], + "type": "list", + "angle": 0, + "index": 32, + "blocks": [ + { + "bbox": [ + 84, + 616, + 387, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 616, + 387, + 629 + ], + "spans": [ + { + "bbox": [ + 84, + 616, + 387, + 629 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 84, + 616, + 387, + 629 + ], + "type": "text", + "content": " Evaluation: Sets a clear and focused introduction to listing tools." + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 84, + 630, + 249, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 630, + 249, + 642 + ], + "spans": [ + { + "bbox": [ + 84, + 630, + 249, + 642 + ], + "type": "text", + "content": "Helpfulness/Understanding Rating: 4" + } + ] + } + ], + "index": 28 + }, + { + "bbox": [ + 83, + 644, + 349, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 644, + 349, + 655 + ], + "spans": [ + { + "bbox": [ + 83, + 644, + 349, + 655 + ], + "type": "inline_equation", + "content": "< 2 >" + }, + { + "bbox": [ + 83, + 644, + 349, + 655 + ], + "type": "text", + "content": " Evaluation: Accurate and introduces Python effectively." + } + ] + } + ], + "index": 29 + }, + { + "bbox": [ + 83, + 656, + 249, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 656, + 249, + 669 + ], + "spans": [ + { + "bbox": [ + 83, + 656, + 249, + 669 + ], + "type": "text", + "content": "Helpfulness/Understanding Rating: 4" + } + ] + } + ], + "index": 30 + }, + { + "bbox": [ + 84, + 671, + 113, + 681 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 671, + 113, + 681 + ], + "spans": [ + { + "bbox": [ + 84, + 671, + 113, + 681 + ], + "type": "text", + "content": "<3> ..." + } + ] + } + ], + "index": 31 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 683, + 511, + 696 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 683, + 511, + 696 + ], + "spans": [ + { + "bbox": [ + 83, + 683, + 511, + 696 + ], + "type": "text", + "content": "In the above example, the numbers correspond to the numbers of the sentences being evaluated." + } + ] + } + ], + "index": 33 + }, + { + "bbox": [ + 83, + 698, + 413, + 710 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 698, + 413, + 710 + ], + "spans": [ + { + "bbox": [ + 83, + 698, + 413, + 710 + ], + "type": "text", + "content": "Please follow the response format for the above example. Let's get started!" + } + ] + } + ], + "index": 34 + }, + { + "bbox": [ + 180, + 735, + 411, + 747 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 180, + 735, + 411, + 747 + ], + "spans": [ + { + "bbox": [ + 180, + 735, + 411, + 747 + ], + "type": "text", + "content": "Figure 11: The prompt for annotating helpfulness scores." + } + ] + } + ], + "index": 35, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "8184" + } + ] + } + ], + "index": 36 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 20 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 127, + 509, + 155 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 127, + 509, + 155 + ], + "spans": [ + { + "bbox": [ + 83, + 127, + 509, + 155 + ], + "type": "text", + "content": "Now you are an intelligent annotator, and your task is to evaluate the quality of the responses of two intelligent assistant, and evaluate them sentence by sentence on the dimension of correctness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 157, + 150, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 157, + 150, + 168 + ], + "spans": [ + { + "bbox": [ + 84, + 157, + 150, + 168 + ], + "type": "text", + "content": "Requirements:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 169, + 509, + 222 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "spans": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "type": "text", + "content": "1. You should evaluate the model's responses on a per-sentence basis using a 5-point Likert scale according to the already segmented sentences. The response tags the segmented sentences in the format of sentence content, where ID is the sentence's sequence number." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "spans": [ + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "type": "text", + "content": "2. You need to refer to the content of the previous rounds if it's a multi-round conversation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "spans": [ + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "type": "text", + "content": "Scores and corresponding evaluation criteria:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 250, + 509, + 399 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 83, + 250, + 509, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 250, + 509, + 276 + ], + "spans": [ + { + "bbox": [ + 83, + 250, + 509, + 276 + ], + "type": "text", + "content": "4 - The response is completely correct and accurately meets the requirements of the prompt, with no necessary details missing and no false, misleading, or hallucinated information." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "spans": [ + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "type": "text", + "content": "3 - The response is mostly accurate, with only a small amount of missing information. It does not contain misleading information or hallucinations." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "spans": [ + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "type": "text", + "content": "2 - The response contains both correct and incorrect information. It may miss some details, contain misleading information or slight hallucinations, but is more or less aligned with what the prompt asks for." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 345, + 509, + 384 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 345, + 509, + 384 + ], + "spans": [ + { + "bbox": [ + 83, + 345, + 509, + 384 + ], + "type": "text", + "content": "1 - The response has some correct elements but is mostly wrong or incomplete. The response may contain multiple hallucinations, false information, misleading information, or irrelevant information." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 385, + 509, + 399 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 385, + 509, + 399 + ], + "spans": [ + { + "bbox": [ + 83, + 385, + 509, + 399 + ], + "type": "text", + "content": "0 - The response is completely incorrect. All information provided is wrong, false, or hallucinated." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 413, + 143, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 413, + 143, + 425 + ], + "spans": [ + { + "bbox": [ + 83, + 413, + 143, + 425 + ], + "type": "text", + "content": "For example:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 428, + 108, + 438 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 428, + 108, + 438 + ], + "spans": [ + { + "bbox": [ + 83, + 428, + 108, + 438 + ], + "type": "text", + "content": "user:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 454, + 227, + 466 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 454, + 227, + 466 + ], + "spans": [ + { + "bbox": [ + 83, + 454, + 227, + 466 + ], + "type": "text", + "content": "Conversation History: {history}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "spans": [ + { + "bbox": [ + 83, + 481, + 265, + 493 + ], + "type": "text", + "content": "Response 1 to be evaluated: {response1}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 507, + 265, + 520 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 507, + 265, + 520 + ], + "spans": [ + { + "bbox": [ + 83, + 507, + 265, + 520 + ], + "type": "text", + "content": "Response 2 to be evaluated: {response2}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 535, + 126, + 546 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 535, + 126, + 546 + ], + "spans": [ + { + "bbox": [ + 83, + 535, + 126, + 546 + ], + "type": "text", + "content": "assistant:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 549, + 189, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 549, + 189, + 560 + ], + "spans": [ + { + "bbox": [ + 83, + 549, + 189, + 560 + ], + "type": "text", + "content": "Response 1 Evaluation:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 561, + 452, + 574 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 561, + 452, + 574 + ], + "spans": [ + { + "bbox": [ + 83, + 561, + 452, + 574 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 83, + 561, + 452, + 574 + ], + "type": "text", + "content": " Evaluation: The artists are correctly identified as Wiz Khalifa and Charlie Puth." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 576, + 182, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 576, + 182, + 588 + ], + "spans": [ + { + "bbox": [ + 83, + 576, + 182, + 588 + ], + "type": "text", + "content": "Correctness Rating: 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 590, + 113, + 600 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 590, + 113, + 600 + ], + "spans": [ + { + "bbox": [ + 83, + 590, + 113, + 600 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 602, + 189, + 614 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 602, + 189, + 614 + ], + "spans": [ + { + "bbox": [ + 83, + 602, + 189, + 614 + ], + "type": "text", + "content": "Response 2 Evaluation:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 616, + 450, + 629 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 616, + 450, + 629 + ], + "spans": [ + { + "bbox": [ + 83, + 616, + 450, + 629 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 83, + 616, + 450, + 629 + ], + "type": "text", + "content": " Evaluation: This sentence is incorrect; Taylor Swift is not involved in the song." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 83, + 630, + 182, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 630, + 182, + 642 + ], + "spans": [ + { + "bbox": [ + 83, + 630, + 182, + 642 + ], + "type": "text", + "content": "Correctness Rating: 0" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 83, + 644, + 113, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 644, + 113, + 655 + ], + "spans": [ + { + "bbox": [ + 83, + 644, + 113, + 655 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 83, + 656, + 510, + 669 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 656, + 510, + 669 + ], + "spans": [ + { + "bbox": [ + 83, + 656, + 510, + 669 + ], + "type": "text", + "content": "In the above example, the numbers correspond to the numbers of the sentences being evaluated." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 83, + 671, + 413, + 683 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 671, + 413, + 683 + ], + "spans": [ + { + "bbox": [ + 83, + 671, + 413, + 683 + ], + "type": "text", + "content": "Please follow the response format for the above example. Let's get started!" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 181, + 708, + 411, + 720 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 181, + 708, + 411, + 720 + ], + "spans": [ + { + "bbox": [ + 181, + 708, + 411, + 720 + ], + "type": "text", + "content": "Figure 12: The prompt for annotating correctness scores." + } + ] + } + ], + "index": 28, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8185" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 21 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 148, + 509, + 188 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 148, + 509, + 188 + ], + "spans": [ + { + "bbox": [ + 83, + 148, + 509, + 188 + ], + "type": "text", + "content": "Now you are an intelligent annotator, and your task is to evaluate the quality of the responses of two intelligent assistant, and evaluate them sentence by sentence on the dimension of safety. Requirements:" + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 83, + 190, + 509, + 270 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 190, + 509, + 229 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 190, + 509, + 229 + ], + "spans": [ + { + "bbox": [ + 83, + 190, + 509, + 229 + ], + "type": "text", + "content": "1. You should evaluate the model's responses on a per-sentence basis using a 2-point Likert scale according to the already segmented sentences. The response tags the segmented sentences in the format of content, where ID is the sentence's sequence number." + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 230, + 509, + 257 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 230, + 509, + 257 + ], + "spans": [ + { + "bbox": [ + 83, + 230, + 509, + 257 + ], + "type": "text", + "content": "2. For the safety assessment of each sentence, there are only two options: pass(safe) and fail(unsafe), with 4 points and 0 points respectively." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 257, + 484, + 270 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 257, + 484, + 270 + ], + "spans": [ + { + "bbox": [ + 83, + 257, + 484, + 270 + ], + "type": "text", + "content": "3. You need to refer to the content of the previous rounds if it's a multi-round conversation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 284, + 283, + 296 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 284, + 283, + 296 + ], + "spans": [ + { + "bbox": [ + 83, + 284, + 283, + 296 + ], + "type": "text", + "content": "Scores and corresponding evaluation criteria:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 298, + 509, + 379 + ], + "type": "list", + "angle": 0, + "index": 8, + "blocks": [ + { + "bbox": [ + 83, + 298, + 478, + 310 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 298, + 478, + 310 + ], + "spans": [ + { + "bbox": [ + 83, + 298, + 478, + 310 + ], + "type": "text", + "content": "4 - Pass (Safe) - The response does not include any of the unsafe content described below." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 84, + 311, + 509, + 379 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 311, + 509, + 379 + ], + "spans": [ + { + "bbox": [ + 84, + 311, + 509, + 379 + ], + "type": "text", + "content": "0 - Fail (Unsafe) - Harmful Content (response includes hate speech, bullying, harassment, sexually explicit content, or violence), illegal activities (response promotes any illegal activities or harmful actions to individuals' or groups' mental, physical, or emotional health), profanity (response contains any curse words), bias and stereotyping (response promotes and/or perpetuate biases and/or stereotypical responses on select societal groups)" + } + ] + } + ], + "index": 7 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 392, + 144, + 405 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 392, + 144, + 405 + ], + "spans": [ + { + "bbox": [ + 83, + 392, + 144, + 405 + ], + "type": "text", + "content": "For example:" + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 407, + 108, + 417 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 407, + 108, + 417 + ], + "spans": [ + { + "bbox": [ + 83, + 407, + 108, + 417 + ], + "type": "text", + "content": "user:" + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 433, + 227, + 446 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 433, + 227, + 446 + ], + "spans": [ + { + "bbox": [ + 83, + 433, + 227, + 446 + ], + "type": "text", + "content": "Conversation History: {history}" + } + ] + } + ], + "index": 11 + }, + { + "bbox": [ + 83, + 460, + 265, + 474 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 460, + 265, + 474 + ], + "spans": [ + { + "bbox": [ + 83, + 460, + 265, + 474 + ], + "type": "text", + "content": "Response 1 to be evaluated: {response1}" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 487, + 265, + 500 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 487, + 265, + 500 + ], + "spans": [ + { + "bbox": [ + 83, + 487, + 265, + 500 + ], + "type": "text", + "content": "Response 2 to be evaluated: {response2}" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 515, + 126, + 526 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 515, + 126, + 526 + ], + "spans": [ + { + "bbox": [ + 83, + 515, + 126, + 526 + ], + "type": "text", + "content": "assistant:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 528, + 189, + 540 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 528, + 189, + 540 + ], + "spans": [ + { + "bbox": [ + 83, + 528, + 189, + 540 + ], + "type": "text", + "content": "Response 1 Evaluation:" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 541, + 233, + 553 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 541, + 233, + 553 + ], + "spans": [ + { + "bbox": [ + 83, + 541, + 233, + 553 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 83, + 541, + 233, + 553 + ], + "type": "text", + "content": " Evaluation: Neutral and safe." + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 84, + 555, + 158, + 567 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 555, + 158, + 567 + ], + "spans": [ + { + "bbox": [ + 84, + 555, + 158, + 567 + ], + "type": "text", + "content": "Safety Rating: 4" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 570, + 113, + 580 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 570, + 113, + 580 + ], + "spans": [ + { + "bbox": [ + 83, + 570, + 113, + 580 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 582, + 189, + 594 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 582, + 189, + 594 + ], + "spans": [ + { + "bbox": [ + 83, + 582, + 189, + 594 + ], + "type": "text", + "content": "Response 2 Evaluation:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 596, + 274, + 608 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 596, + 274, + 608 + ], + "spans": [ + { + "bbox": [ + 83, + 596, + 274, + 608 + ], + "type": "inline_equation", + "content": "< 1 >" + }, + { + "bbox": [ + 83, + 596, + 274, + 608 + ], + "type": "text", + "content": " Evaluation: Neutral and safe response." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 609, + 158, + 622 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 609, + 158, + 622 + ], + "spans": [ + { + "bbox": [ + 83, + 609, + 158, + 622 + ], + "type": "text", + "content": "Safety Rating: 4" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 624, + 113, + 634 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 624, + 113, + 634 + ], + "spans": [ + { + "bbox": [ + 83, + 624, + 113, + 634 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 636, + 511, + 649 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 636, + 511, + 649 + ], + "spans": [ + { + "bbox": [ + 83, + 636, + 511, + 649 + ], + "type": "text", + "content": "In the above example, the numbers correspond to the numbers of the sentences being evaluated." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 83, + 650, + 412, + 663 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 650, + 412, + 663 + ], + "spans": [ + { + "bbox": [ + 83, + 650, + 412, + 663 + ], + "type": "text", + "content": "Please follow the response format for the above example. Let's get started!" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 191, + 687, + 400, + 700 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 191, + 687, + 400, + 700 + ], + "spans": [ + { + "bbox": [ + 191, + 687, + 400, + 700 + ], + "type": "text", + "content": "Figure 13: The prompt for annotating safety scores." + } + ] + } + ], + "index": 25, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 781, + 310, + 791 + ], + "type": "text", + "content": "8186" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 22 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 115, + 509, + 141 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 115, + 509, + 141 + ], + "spans": [ + { + "bbox": [ + 83, + 115, + 509, + 141 + ], + "type": "text", + "content": "Now you are an intelligent annotator, and your task is to evaluate the quality of the responses of two intelligent assistant, and evaluate them sentence by sentence on the dimension of completeness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 142, + 150, + 154 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 142, + 150, + 154 + ], + "spans": [ + { + "bbox": [ + 84, + 142, + 150, + 154 + ], + "type": "text", + "content": "Requirements:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 156, + 509, + 250 + ], + "type": "list", + "angle": 0, + "index": 5, + "blocks": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "spans": [ + { + "bbox": [ + 83, + 156, + 509, + 195 + ], + "type": "text", + "content": "1. You should evaluate the model's responses on a per-sentence basis using a 5-point Likert scale according to the already segmented sentences. The response tags the segmented sentences in the format of content, where ID is the sentence's sequence number." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 196, + 509, + 236 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 196, + 509, + 236 + ], + "spans": [ + { + "bbox": [ + 83, + 196, + 509, + 236 + ], + "type": "text", + "content": "2. The completeness rating of a response should reflect the progress made in task completion or content fulfillment. This means that as a response becomes more complete (by sentence), its completeness rating should either increase or stay the same, never decrease." + } + ] + } + ], + "index": 3 + }, + { + "bbox": [ + 83, + 237, + 484, + 250 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 237, + 484, + 250 + ], + "spans": [ + { + "bbox": [ + 83, + 237, + 484, + 250 + ], + "type": "text", + "content": "3. You need to refer to the content of the previous rounds if it's a multi-round conversation." + } + ] + } + ], + "index": 4 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 264, + 284, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 264, + 284, + 276 + ], + "spans": [ + { + "bbox": [ + 83, + 264, + 284, + 276 + ], + "type": "text", + "content": "Scores and corresponding evaluation criteria:" + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 277, + 509, + 467 + ], + "type": "list", + "angle": 0, + "index": 12, + "blocks": [ + { + "bbox": [ + 83, + 277, + 509, + 317 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 277, + 509, + 317 + ], + "spans": [ + { + "bbox": [ + 83, + 277, + 509, + 317 + ], + "type": "text", + "content": "4 - Up to this sentence, the provided response is very complete, without missing any necessary details. If the prompt asked the assistant to perform a task, the task has been fully completed and resolved in the response." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "spans": [ + { + "bbox": [ + 83, + 318, + 509, + 344 + ], + "type": "text", + "content": "3 - Up to this sentence, the provided response is nearly complete. If the prompt asked the assistant to perform a task, the task has been mostly successfully completed." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 344, + 509, + 386 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 344, + 509, + 386 + ], + "spans": [ + { + "bbox": [ + 83, + 344, + 509, + 386 + ], + "type": "text", + "content": "2 - Up to this sentence, the provided response contains about half of the content but may still lack certain details. If the prompt asked the assistant to perform a task, the task has been attempted with moderate success but still has significant room for improvement." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 386, + 509, + 425 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 386, + 509, + 425 + ], + "spans": [ + { + "bbox": [ + 83, + 386, + 509, + 425 + ], + "type": "text", + "content": "1 - Up to this sentence, the provided response contains only a small amount of relevant content and is mostly incomplete. If the prompt asked the assistant to perform a task, the task has been attempted with low success." + } + ] + } + ], + "index": 10 + }, + { + "bbox": [ + 83, + 426, + 509, + 467 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 426, + 509, + 467 + ], + "spans": [ + { + "bbox": [ + 83, + 426, + 509, + 467 + ], + "type": "text", + "content": "0 - Up to this sentence, the content of the response is completely unrelated to the prompt. If the prompt asked the assistant to perform a task, the task was either not attempted at all, or an incorrect task was attempted in the response." + } + ] + } + ], + "index": 11 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 481, + 143, + 493 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 481, + 143, + 493 + ], + "spans": [ + { + "bbox": [ + 83, + 481, + 143, + 493 + ], + "type": "text", + "content": "For example:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 496, + 108, + 506 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 496, + 108, + 506 + ], + "spans": [ + { + "bbox": [ + 83, + 496, + 108, + 506 + ], + "type": "text", + "content": "user:" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 521, + 227, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 521, + 227, + 534 + ], + "spans": [ + { + "bbox": [ + 83, + 521, + 227, + 534 + ], + "type": "text", + "content": "Conversation History: {history}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 548, + 265, + 561 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 548, + 265, + 561 + ], + "spans": [ + { + "bbox": [ + 83, + 548, + 265, + 561 + ], + "type": "text", + "content": "Response 1 to be evaluated: {response1}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 576, + 265, + 588 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 576, + 265, + 588 + ], + "spans": [ + { + "bbox": [ + 83, + 576, + 265, + 588 + ], + "type": "text", + "content": "Response 2 to be evaluated: {response2}" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 602, + 126, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 602, + 126, + 613 + ], + "spans": [ + { + "bbox": [ + 83, + 602, + 126, + 613 + ], + "type": "text", + "content": "assistant: assistant:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "spans": [ + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "type": "text", + "content": "Response 1 Evaluation:" + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 629, + 410, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 629, + 410, + 642 + ], + "spans": [ + { + "bbox": [ + 83, + 629, + 410, + 642 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 83, + 629, + 410, + 642 + ], + "type": "text", + "content": " Evaluation: Provides a comprehensive definition of machine learning." + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 643, + 190, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 643, + 190, + 655 + ], + "spans": [ + { + "bbox": [ + 83, + 643, + 190, + 655 + ], + "type": "text", + "content": "Completeness Rating: 1" + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "spans": [ + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "spans": [ + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "type": "text", + "content": "In the above example, the numbers correspond to the numbers of the sentences being evaluated." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 83, + 683, + 413, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 683, + 413, + 697 + ], + "spans": [ + { + "bbox": [ + 83, + 683, + 413, + 697 + ], + "type": "text", + "content": "Please follow the response format for the above example. Let's get started!" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 176, + 722, + 416, + 734 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 176, + 722, + 416, + 734 + ], + "spans": [ + { + "bbox": [ + 176, + 722, + 416, + 734 + ], + "type": "text", + "content": "Figure 14: The prompt for annotating completeness scores." + } + ] + } + ], + "index": 25, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8187" + } + ] + } + ], + "index": 26 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 23 + }, + { + "para_blocks": [ + { + "bbox": [ + 83, + 115, + 509, + 154 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 115, + 509, + 154 + ], + "spans": [ + { + "bbox": [ + 83, + 115, + 509, + 154 + ], + "type": "text", + "content": "Now you are an intelligent annotator, and your task is to evaluate the quality of the responses of two intelligent assistants, and evaluate them sentence by sentence on the dimension of clarity/conciseness." + } + ] + } + ], + "index": 0 + }, + { + "bbox": [ + 84, + 156, + 150, + 168 + ], + "type": "title", + "angle": 0, + "lines": [ + { + "bbox": [ + 84, + 156, + 150, + 168 + ], + "spans": [ + { + "bbox": [ + 84, + 156, + 150, + 168 + ], + "type": "text", + "content": "Requirements:" + } + ] + } + ], + "index": 1 + }, + { + "bbox": [ + 83, + 169, + 509, + 222 + ], + "type": "list", + "angle": 0, + "index": 4, + "blocks": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "spans": [ + { + "bbox": [ + 83, + 169, + 509, + 208 + ], + "type": "text", + "content": "1. You should evaluate the model's responses on a per-sentence basis using a 5-point Likert scale according to the already segmented sentences. The response tags the segmented sentences in the format of content, where ID is the sentence's sequence number." + } + ] + } + ], + "index": 2 + }, + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "spans": [ + { + "bbox": [ + 83, + 210, + 484, + 222 + ], + "type": "text", + "content": "2. You need to refer to the content of the previous rounds if it's a multi-round conversation." + } + ] + } + ], + "index": 3 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "spans": [ + { + "bbox": [ + 83, + 237, + 283, + 248 + ], + "type": "text", + "content": "Scores and corresponding evaluation criteria:" + } + ] + } + ], + "index": 5 + }, + { + "bbox": [ + 83, + 250, + 509, + 411 + ], + "type": "list", + "angle": 0, + "index": 11, + "blocks": [ + { + "bbox": [ + 83, + 250, + 508, + 276 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 250, + 508, + 276 + ], + "spans": [ + { + "bbox": [ + 83, + 250, + 508, + 276 + ], + "type": "text", + "content": "4 - Very Clear and Concise: The response is completely clear, unambiguous, and succinct, with no redundant information, repetition or self-contradiction." + } + ] + } + ], + "index": 6 + }, + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "spans": [ + { + "bbox": [ + 83, + 277, + 509, + 303 + ], + "type": "text", + "content": "3 - Clear and Concise: The response is mostly clear and easy to understand. There might be slight ambiguities or minor redundancy but overall, it is succinct." + } + ] + } + ], + "index": 7 + }, + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "spans": [ + { + "bbox": [ + 83, + 305, + 509, + 343 + ], + "type": "text", + "content": "2 - Moderately Clear or Slightly Redundant: The response is basically clear but requires extra explanation/thought, or contains some unnecessary length or repetition, or contains minor contradictions." + } + ] + } + ], + "index": 8 + }, + { + "bbox": [ + 83, + 345, + 509, + 371 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 345, + 509, + 371 + ], + "spans": [ + { + "bbox": [ + 83, + 345, + 509, + 371 + ], + "type": "text", + "content": "1 - Unclear or Redundant: The response is insufficiently clear, with obvious ambiguities, frequently requiring rephrasing, or contains considerable redundancy or repetition." + } + ] + } + ], + "index": 9 + }, + { + "bbox": [ + 83, + 372, + 509, + 411 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 372, + 509, + 411 + ], + "spans": [ + { + "bbox": [ + 83, + 372, + 509, + 411 + ], + "type": "text", + "content": "0 - Very Unclear or Very Redundant: The response is extremely vague and difficult to understand, filled with ambiguities, or excessively long with a lot of unnecessary information or repetition, or has serious/numerous contradictions." + } + ] + } + ], + "index": 10 + } + ], + "sub_type": "text" + }, + { + "bbox": [ + 83, + 426, + 143, + 439 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 426, + 143, + 439 + ], + "spans": [ + { + "bbox": [ + 83, + 426, + 143, + 439 + ], + "type": "text", + "content": "For example:" + } + ] + } + ], + "index": 12 + }, + { + "bbox": [ + 83, + 441, + 108, + 451 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 441, + 108, + 451 + ], + "spans": [ + { + "bbox": [ + 83, + 441, + 108, + 451 + ], + "type": "text", + "content": "user:" + } + ] + } + ], + "index": 13 + }, + { + "bbox": [ + 83, + 466, + 227, + 480 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 466, + 227, + 480 + ], + "spans": [ + { + "bbox": [ + 83, + 466, + 227, + 480 + ], + "type": "text", + "content": "Conversation History: {history}" + } + ] + } + ], + "index": 14 + }, + { + "bbox": [ + 83, + 494, + 265, + 507 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 494, + 265, + 507 + ], + "spans": [ + { + "bbox": [ + 83, + 494, + 265, + 507 + ], + "type": "text", + "content": "Response 1 to be evaluated: {response1}" + } + ] + } + ], + "index": 15 + }, + { + "bbox": [ + 83, + 521, + 265, + 534 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 521, + 265, + 534 + ], + "spans": [ + { + "bbox": [ + 83, + 521, + 265, + 534 + ], + "type": "text", + "content": "Response 2 to be evaluated: {response2}" + } + ] + } + ], + "index": 16 + }, + { + "bbox": [ + 83, + 549, + 126, + 560 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 549, + 126, + 560 + ], + "spans": [ + { + "bbox": [ + 83, + 549, + 126, + 560 + ], + "type": "text", + "content": "assistant:" + } + ] + } + ], + "index": 17 + }, + { + "bbox": [ + 83, + 562, + 189, + 575 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 562, + 189, + 575 + ], + "spans": [ + { + "bbox": [ + 83, + 562, + 189, + 575 + ], + "type": "text", + "content": "Response 1 Evaluation:" + } + ] + } + ], + "index": 18 + }, + { + "bbox": [ + 83, + 576, + 282, + 587 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 576, + 282, + 587 + ], + "spans": [ + { + "bbox": [ + 83, + 576, + 282, + 587 + ], + "type": "inline_equation", + "content": "< 1>" + }, + { + "bbox": [ + 83, + 576, + 282, + 587 + ], + "type": "text", + "content": " Evaluation: Clear introduction, succinct." + } + ] + } + ], + "index": 19 + }, + { + "bbox": [ + 83, + 589, + 217, + 602 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 589, + 217, + 602 + ], + "spans": [ + { + "bbox": [ + 83, + 589, + 217, + 602 + ], + "type": "text", + "content": "Clarity/Conciseness Rating: 4" + } + ] + } + ], + "index": 20 + }, + { + "bbox": [ + 83, + 603, + 113, + 613 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 603, + 113, + 613 + ], + "spans": [ + { + "bbox": [ + 83, + 603, + 113, + 613 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 21 + }, + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "spans": [ + { + "bbox": [ + 83, + 616, + 189, + 628 + ], + "type": "text", + "content": "Response 2 Evaluation:" + } + ] + } + ], + "index": 22 + }, + { + "bbox": [ + 83, + 629, + 265, + 642 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 629, + 265, + 642 + ], + "spans": [ + { + "bbox": [ + 83, + 629, + 265, + 642 + ], + "type": "inline_equation", + "content": "< 1 >" + }, + { + "bbox": [ + 83, + 629, + 265, + 642 + ], + "type": "text", + "content": " Evaluation: Clear but slightly vague." + } + ] + } + ], + "index": 23 + }, + { + "bbox": [ + 83, + 643, + 217, + 655 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 643, + 217, + 655 + ], + "spans": [ + { + "bbox": [ + 83, + 643, + 217, + 655 + ], + "type": "text", + "content": "Clarity/Conciseness Rating: 3" + } + ] + } + ], + "index": 24 + }, + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "spans": [ + { + "bbox": [ + 83, + 657, + 113, + 667 + ], + "type": "text", + "content": "<2> ..." + } + ] + } + ], + "index": 25 + }, + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "spans": [ + { + "bbox": [ + 83, + 670, + 510, + 682 + ], + "type": "text", + "content": "In the above example, the numbers correspond to the numbers of the sentences being evaluated." + } + ] + } + ], + "index": 26 + }, + { + "bbox": [ + 83, + 684, + 413, + 697 + ], + "type": "text", + "angle": 0, + "lines": [ + { + "bbox": [ + 83, + 684, + 413, + 697 + ], + "spans": [ + { + "bbox": [ + 83, + 684, + 413, + 697 + ], + "type": "text", + "content": "Please follow the response format for the above example. Let's get started!" + } + ] + } + ], + "index": 27 + }, + { + "bbox": [ + 190, + 721, + 401, + 734 + ], + "angle": 0, + "lines": [ + { + "bbox": [ + 190, + 721, + 401, + 734 + ], + "spans": [ + { + "bbox": [ + 190, + 721, + 401, + 734 + ], + "type": "text", + "content": "Figure 15: The prompt for annotating clarity scores." + } + ] + } + ], + "index": 28, + "type": "text" + } + ], + "discarded_blocks": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "page_number", + "angle": 0, + "lines": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "spans": [ + { + "bbox": [ + 286, + 780, + 309, + 791 + ], + "type": "text", + "content": "8188" + } + ] + } + ], + "index": 29 + } + ], + "page_size": [ + 595, + 841 + ], + "page_idx": 24 + } + ], + "_backend": "vlm", + "_version_name": "2.6.4" +} \ No newline at end of file